1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2015 Joyent, Inc.
27 */
28
29/* Portions Copyright 2007 Jeremy Teo */
30/* Portions Copyright 2010 Robert Milkowski */
31
32#include <sys/types.h>
33#include <sys/param.h>
34#include <sys/time.h>
35#include <sys/systm.h>
36#include <sys/sysmacros.h>
37#include <sys/resource.h>
38#include <sys/vfs.h>
39#include <sys/vfs_opreg.h>
40#include <sys/vnode.h>
41#include <sys/file.h>
42#include <sys/stat.h>
43#include <sys/kmem.h>
44#include <sys/taskq.h>
45#include <sys/uio.h>
46#include <sys/vmsystm.h>
47#include <sys/atomic.h>
48#include <sys/vm.h>
49#include <vm/seg_vn.h>
50#include <vm/pvn.h>
51#include <vm/as.h>
52#include <vm/kpm.h>
53#include <vm/seg_kpm.h>
54#include <sys/mman.h>
55#include <sys/pathname.h>
56#include <sys/cmn_err.h>
57#include <sys/errno.h>
58#include <sys/unistd.h>
59#include <sys/zfs_dir.h>
60#include <sys/zfs_acl.h>
61#include <sys/zfs_ioctl.h>
62#include <sys/fs/zfs.h>
63#include <sys/dmu.h>
64#include <sys/dmu_objset.h>
65#include <sys/spa.h>
66#include <sys/txg.h>
67#include <sys/dbuf.h>
68#include <sys/zap.h>
69#include <sys/sa.h>
70#include <sys/dirent.h>
71#include <sys/policy.h>
72#include <sys/sunddi.h>
73#include <sys/filio.h>
74#include <sys/sid.h>
75#include "fs/fs_subr.h"
76#include <sys/zfs_ctldir.h>
77#include <sys/zfs_fuid.h>
78#include <sys/zfs_sa.h>
79#include <sys/dnlc.h>
80#include <sys/zfs_rlock.h>
81#include <sys/extdirent.h>
82#include <sys/kidmap.h>
83#include <sys/cred.h>
84#include <sys/attr.h>
85
86/*
87 * Programming rules.
88 *
89 * Each vnode op performs some logical unit of work.  To do this, the ZPL must
90 * properly lock its in-core state, create a DMU transaction, do the work,
91 * record this work in the intent log (ZIL), commit the DMU transaction,
92 * and wait for the intent log to commit if it is a synchronous operation.
93 * Moreover, the vnode ops must work in both normal and log replay context.
94 * The ordering of events is important to avoid deadlocks and references
95 * to freed memory.  The example below illustrates the following Big Rules:
96 *
97 *  (1)	A check must be made in each zfs thread for a mounted file system.
98 *	This is done avoiding races using ZFS_ENTER(zfsvfs).
99 *	A ZFS_EXIT(zfsvfs) is needed before all returns.  Any znodes
100 *	must be checked with ZFS_VERIFY_ZP(zp).  Both of these macros
101 *	can return EIO from the calling function.
102 *
103 *  (2)	VN_RELE() should always be the last thing except for zil_commit()
104 *	(if necessary) and ZFS_EXIT(). This is for 3 reasons:
105 *	First, if it's the last reference, the vnode/znode
106 *	can be freed, so the zp may point to freed memory.  Second, the last
107 *	reference will call zfs_zinactive(), which may induce a lot of work --
108 *	pushing cached pages (which acquires range locks) and syncing out
109 *	cached atime changes.  Third, zfs_zinactive() may require a new tx,
110 *	which could deadlock the system if you were already holding one.
111 *	If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
112 *
113 *  (3)	All range locks must be grabbed before calling dmu_tx_assign(),
114 *	as they can span dmu_tx_assign() calls.
115 *
116 *  (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
117 *      dmu_tx_assign().  This is critical because we don't want to block
118 *      while holding locks.
119 *
120 *	If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT.  This
121 *	reduces lock contention and CPU usage when we must wait (note that if
122 *	throughput is constrained by the storage, nearly every transaction
123 *	must wait).
124 *
125 *      Note, in particular, that if a lock is sometimes acquired before
126 *      the tx assigns, and sometimes after (e.g. z_lock), then failing
127 *      to use a non-blocking assign can deadlock the system.  The scenario:
128 *
129 *	Thread A has grabbed a lock before calling dmu_tx_assign().
130 *	Thread B is in an already-assigned tx, and blocks for this lock.
131 *	Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
132 *	forever, because the previous txg can't quiesce until B's tx commits.
133 *
134 *	If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
135 *	then drop all locks, call dmu_tx_wait(), and try again.  On subsequent
136 *	calls to dmu_tx_assign(), pass TXG_WAITED rather than TXG_NOWAIT,
137 *	to indicate that this operation has already called dmu_tx_wait().
138 *	This will ensure that we don't retry forever, waiting a short bit
139 *	each time.
140 *
141 *  (5)	If the operation succeeded, generate the intent log entry for it
142 *	before dropping locks.  This ensures that the ordering of events
143 *	in the intent log matches the order in which they actually occurred.
144 *	During ZIL replay the zfs_log_* functions will update the sequence
145 *	number to indicate the zil transaction has replayed.
146 *
147 *  (6)	At the end of each vnode op, the DMU tx must always commit,
148 *	regardless of whether there were any errors.
149 *
150 *  (7)	After dropping all locks, invoke zil_commit(zilog, foid)
151 *	to ensure that synchronous semantics are provided when necessary.
152 *
153 * In general, this is how things should be ordered in each vnode op:
154 *
155 *	ZFS_ENTER(zfsvfs);		// exit if unmounted
156 * top:
157 *	zfs_dirent_lock(&dl, ...)	// lock directory entry (may VN_HOLD())
158 *	rw_enter(...);			// grab any other locks you need
159 *	tx = dmu_tx_create(...);	// get DMU tx
160 *	dmu_tx_hold_*();		// hold each object you might modify
161 *	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
162 *	if (error) {
163 *		rw_exit(...);		// drop locks
164 *		zfs_dirent_unlock(dl);	// unlock directory entry
165 *		VN_RELE(...);		// release held vnodes
166 *		if (error == ERESTART) {
167 *			waited = B_TRUE;
168 *			dmu_tx_wait(tx);
169 *			dmu_tx_abort(tx);
170 *			goto top;
171 *		}
172 *		dmu_tx_abort(tx);	// abort DMU tx
173 *		ZFS_EXIT(zfsvfs);	// finished in zfs
174 *		return (error);		// really out of space
175 *	}
176 *	error = do_real_work();		// do whatever this VOP does
177 *	if (error == 0)
178 *		zfs_log_*(...);		// on success, make ZIL entry
179 *	dmu_tx_commit(tx);		// commit DMU tx -- error or not
180 *	rw_exit(...);			// drop locks
181 *	zfs_dirent_unlock(dl);		// unlock directory entry
182 *	VN_RELE(...);			// release held vnodes
183 *	zil_commit(zilog, foid);	// synchronous when necessary
184 *	ZFS_EXIT(zfsvfs);		// finished in zfs
185 *	return (error);			// done, report error
186 */
187
188/* ARGSUSED */
189static int
190zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
191{
192	znode_t	*zp = VTOZ(*vpp);
193	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
194
195	ZFS_ENTER(zfsvfs);
196	ZFS_VERIFY_ZP(zp);
197
198	if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
199	    ((flag & FAPPEND) == 0)) {
200		ZFS_EXIT(zfsvfs);
201		return (SET_ERROR(EPERM));
202	}
203
204	if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
205	    ZTOV(zp)->v_type == VREG &&
206	    !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
207		if (fs_vscan(*vpp, cr, 0) != 0) {
208			ZFS_EXIT(zfsvfs);
209			return (SET_ERROR(EACCES));
210		}
211	}
212
213	/* Keep a count of the synchronous opens in the znode */
214	if (flag & (FSYNC | FDSYNC))
215		atomic_inc_32(&zp->z_sync_cnt);
216
217	ZFS_EXIT(zfsvfs);
218	return (0);
219}
220
221/* ARGSUSED */
222static int
223zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
224    caller_context_t *ct)
225{
226	znode_t	*zp = VTOZ(vp);
227	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
228
229	/*
230	 * Clean up any locks held by this process on the vp.
231	 */
232	cleanlocks(vp, ddi_get_pid(), 0);
233	cleanshares(vp, ddi_get_pid());
234
235	ZFS_ENTER(zfsvfs);
236	ZFS_VERIFY_ZP(zp);
237
238	/* Decrement the synchronous opens in the znode */
239	if ((flag & (FSYNC | FDSYNC)) && (count == 1))
240		atomic_dec_32(&zp->z_sync_cnt);
241
242	if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
243	    ZTOV(zp)->v_type == VREG &&
244	    !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
245		VERIFY(fs_vscan(vp, cr, 1) == 0);
246
247	ZFS_EXIT(zfsvfs);
248	return (0);
249}
250
251/*
252 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
253 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
254 */
255static int
256zfs_holey(vnode_t *vp, int cmd, offset_t *off)
257{
258	znode_t	*zp = VTOZ(vp);
259	uint64_t noff = (uint64_t)*off; /* new offset */
260	uint64_t file_sz;
261	int error;
262	boolean_t hole;
263
264	file_sz = zp->z_size;
265	if (noff >= file_sz)  {
266		return (SET_ERROR(ENXIO));
267	}
268
269	if (cmd == _FIO_SEEK_HOLE)
270		hole = B_TRUE;
271	else
272		hole = B_FALSE;
273
274	error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
275
276	if (error == ESRCH)
277		return (SET_ERROR(ENXIO));
278
279	/*
280	 * We could find a hole that begins after the logical end-of-file,
281	 * because dmu_offset_next() only works on whole blocks.  If the
282	 * EOF falls mid-block, then indicate that the "virtual hole"
283	 * at the end of the file begins at the logical EOF, rather than
284	 * at the end of the last block.
285	 */
286	if (noff > file_sz) {
287		ASSERT(hole);
288		noff = file_sz;
289	}
290
291	if (noff < *off)
292		return (error);
293	*off = noff;
294	return (error);
295}
296
297/* ARGSUSED */
298static int
299zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred,
300    int *rvalp, caller_context_t *ct)
301{
302	offset_t off;
303	offset_t ndata;
304	dmu_object_info_t doi;
305	int error;
306	zfsvfs_t *zfsvfs;
307	znode_t *zp;
308
309	switch (com) {
310	case _FIOFFS:
311	{
312		return (zfs_sync(vp->v_vfsp, 0, cred));
313
314		/*
315		 * The following two ioctls are used by bfu.  Faking out,
316		 * necessary to avoid bfu errors.
317		 */
318	}
319	case _FIOGDIO:
320	case _FIOSDIO:
321	{
322		return (0);
323	}
324
325	case _FIO_SEEK_DATA:
326	case _FIO_SEEK_HOLE:
327	{
328		if (ddi_copyin((void *)data, &off, sizeof (off), flag))
329			return (SET_ERROR(EFAULT));
330
331		zp = VTOZ(vp);
332		zfsvfs = zp->z_zfsvfs;
333		ZFS_ENTER(zfsvfs);
334		ZFS_VERIFY_ZP(zp);
335
336		/* offset parameter is in/out */
337		error = zfs_holey(vp, com, &off);
338		ZFS_EXIT(zfsvfs);
339		if (error)
340			return (error);
341		if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
342			return (SET_ERROR(EFAULT));
343		return (0);
344	}
345	case _FIO_COUNT_FILLED:
346	{
347		/*
348		 * _FIO_COUNT_FILLED adds a new ioctl command which
349		 * exposes the number of filled blocks in a
350		 * ZFS object.
351		 */
352		zp = VTOZ(vp);
353		zfsvfs = zp->z_zfsvfs;
354		ZFS_ENTER(zfsvfs);
355		ZFS_VERIFY_ZP(zp);
356
357		/*
358		 * Wait for all dirty blocks for this object
359		 * to get synced out to disk, and the DMU info
360		 * updated.
361		 */
362		error = dmu_object_wait_synced(zfsvfs->z_os, zp->z_id);
363		if (error) {
364			ZFS_EXIT(zfsvfs);
365			return (error);
366		}
367
368		/*
369		 * Retrieve fill count from DMU object.
370		 */
371		error = dmu_object_info(zfsvfs->z_os, zp->z_id, &doi);
372		if (error) {
373			ZFS_EXIT(zfsvfs);
374			return (error);
375		}
376
377		ndata = doi.doi_fill_count;
378
379		ZFS_EXIT(zfsvfs);
380		if (ddi_copyout(&ndata, (void *)data, sizeof (ndata), flag))
381			return (SET_ERROR(EFAULT));
382		return (0);
383	}
384	}
385	return (SET_ERROR(ENOTTY));
386}
387
388/*
389 * Utility functions to map and unmap a single physical page.  These
390 * are used to manage the mappable copies of ZFS file data, and therefore
391 * do not update ref/mod bits.
392 */
393caddr_t
394zfs_map_page(page_t *pp, enum seg_rw rw)
395{
396	if (kpm_enable)
397		return (hat_kpm_mapin(pp, 0));
398	ASSERT(rw == S_READ || rw == S_WRITE);
399	return (ppmapin(pp, PROT_READ | ((rw == S_WRITE) ? PROT_WRITE : 0),
400	    (caddr_t)-1));
401}
402
403void
404zfs_unmap_page(page_t *pp, caddr_t addr)
405{
406	if (kpm_enable) {
407		hat_kpm_mapout(pp, 0, addr);
408	} else {
409		ppmapout(addr);
410	}
411}
412
413/*
414 * When a file is memory mapped, we must keep the IO data synchronized
415 * between the DMU cache and the memory mapped pages.  What this means:
416 *
417 * On Write:	If we find a memory mapped page, we write to *both*
418 *		the page and the dmu buffer.
419 */
420static void
421update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid)
422{
423	int64_t	off;
424
425	off = start & PAGEOFFSET;
426	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
427		page_t *pp;
428		uint64_t nbytes = MIN(PAGESIZE - off, len);
429
430		if (pp = page_lookup(vp, start, SE_SHARED)) {
431			caddr_t va;
432
433			va = zfs_map_page(pp, S_WRITE);
434			(void) dmu_read(os, oid, start+off, nbytes, va+off,
435			    DMU_READ_PREFETCH);
436			zfs_unmap_page(pp, va);
437			page_unlock(pp);
438		}
439		len -= nbytes;
440		off = 0;
441	}
442}
443
444/*
445 * When a file is memory mapped, we must keep the IO data synchronized
446 * between the DMU cache and the memory mapped pages.  What this means:
447 *
448 * On Read:	We "read" preferentially from memory mapped pages,
449 *		else we default from the dmu buffer.
450 *
451 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
452 *	 the file is memory mapped.
453 */
454static int
455mappedread(vnode_t *vp, int nbytes, uio_t *uio)
456{
457	znode_t *zp = VTOZ(vp);
458	int64_t	start, off;
459	int len = nbytes;
460	int error = 0;
461
462	start = uio->uio_loffset;
463	off = start & PAGEOFFSET;
464	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
465		page_t *pp;
466		uint64_t bytes = MIN(PAGESIZE - off, len);
467
468		if (pp = page_lookup(vp, start, SE_SHARED)) {
469			caddr_t va;
470
471			va = zfs_map_page(pp, S_READ);
472			error = uiomove(va + off, bytes, UIO_READ, uio);
473			zfs_unmap_page(pp, va);
474			page_unlock(pp);
475		} else {
476			error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
477			    uio, bytes);
478		}
479		len -= bytes;
480		off = 0;
481		if (error)
482			break;
483	}
484	return (error);
485}
486
487offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
488
489/*
490 * Read bytes from specified file into supplied buffer.
491 *
492 *	IN:	vp	- vnode of file to be read from.
493 *		uio	- structure supplying read location, range info,
494 *			  and return buffer.
495 *		ioflag	- SYNC flags; used to provide FRSYNC semantics.
496 *		cr	- credentials of caller.
497 *		ct	- caller context
498 *
499 *	OUT:	uio	- updated offset and range, buffer filled.
500 *
501 *	RETURN:	0 on success, error code on failure.
502 *
503 * Side Effects:
504 *	vp - atime updated if byte count > 0
505 */
506/* ARGSUSED */
507static int
508zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
509{
510	znode_t		*zp = VTOZ(vp);
511	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
512	ssize_t		n, nbytes;
513	int		error = 0;
514	rl_t		*rl;
515	xuio_t		*xuio = NULL;
516
517	ZFS_ENTER(zfsvfs);
518	ZFS_VERIFY_ZP(zp);
519
520	if (zp->z_pflags & ZFS_AV_QUARANTINED) {
521		ZFS_EXIT(zfsvfs);
522		return (SET_ERROR(EACCES));
523	}
524
525	/*
526	 * Validate file offset
527	 */
528	if (uio->uio_loffset < (offset_t)0) {
529		ZFS_EXIT(zfsvfs);
530		return (SET_ERROR(EINVAL));
531	}
532
533	/*
534	 * Fasttrack empty reads
535	 */
536	if (uio->uio_resid == 0) {
537		ZFS_EXIT(zfsvfs);
538		return (0);
539	}
540
541	/*
542	 * Check for mandatory locks
543	 */
544	if (MANDMODE(zp->z_mode)) {
545		if (error = chklock(vp, FREAD,
546		    uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
547			ZFS_EXIT(zfsvfs);
548			return (error);
549		}
550	}
551
552	/*
553	 * If we're in FRSYNC mode, sync out this znode before reading it.
554	 */
555	if (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
556		zil_commit(zfsvfs->z_log, zp->z_id);
557
558	/*
559	 * Lock the range against changes.
560	 */
561	rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
562
563	/*
564	 * If we are reading past end-of-file we can skip
565	 * to the end; but we might still need to set atime.
566	 */
567	if (uio->uio_loffset >= zp->z_size) {
568		error = 0;
569		goto out;
570	}
571
572	ASSERT(uio->uio_loffset < zp->z_size);
573	n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
574
575	if ((uio->uio_extflg == UIO_XUIO) &&
576	    (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
577		int nblk;
578		int blksz = zp->z_blksz;
579		uint64_t offset = uio->uio_loffset;
580
581		xuio = (xuio_t *)uio;
582		if ((ISP2(blksz))) {
583			nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
584			    blksz)) / blksz;
585		} else {
586			ASSERT(offset + n <= blksz);
587			nblk = 1;
588		}
589		(void) dmu_xuio_init(xuio, nblk);
590
591		if (vn_has_cached_data(vp)) {
592			/*
593			 * For simplicity, we always allocate a full buffer
594			 * even if we only expect to read a portion of a block.
595			 */
596			while (--nblk >= 0) {
597				(void) dmu_xuio_add(xuio,
598				    dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
599				    blksz), 0, blksz);
600			}
601		}
602	}
603
604	while (n > 0) {
605		nbytes = MIN(n, zfs_read_chunk_size -
606		    P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
607
608		if (vn_has_cached_data(vp)) {
609			error = mappedread(vp, nbytes, uio);
610		} else {
611			error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
612			    uio, nbytes);
613		}
614		if (error) {
615			/* convert checksum errors into IO errors */
616			if (error == ECKSUM)
617				error = SET_ERROR(EIO);
618			break;
619		}
620
621		n -= nbytes;
622	}
623out:
624	zfs_range_unlock(rl);
625
626	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
627	ZFS_EXIT(zfsvfs);
628	return (error);
629}
630
631/*
632 * Write the bytes to a file.
633 *
634 *	IN:	vp	- vnode of file to be written to.
635 *		uio	- structure supplying write location, range info,
636 *			  and data buffer.
637 *		ioflag	- FAPPEND, FSYNC, and/or FDSYNC.  FAPPEND is
638 *			  set if in append mode.
639 *		cr	- credentials of caller.
640 *		ct	- caller context (NFS/CIFS fem monitor only)
641 *
642 *	OUT:	uio	- updated offset and range.
643 *
644 *	RETURN:	0 on success, error code on failure.
645 *
646 * Timestamps:
647 *	vp - ctime|mtime updated if byte count > 0
648 */
649
650/* ARGSUSED */
651static int
652zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
653{
654	znode_t		*zp = VTOZ(vp);
655	rlim64_t	limit = uio->uio_llimit;
656	ssize_t		start_resid = uio->uio_resid;
657	ssize_t		tx_bytes;
658	uint64_t	end_size;
659	dmu_tx_t	*tx;
660	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
661	zilog_t		*zilog;
662	offset_t	woff;
663	ssize_t		n, nbytes;
664	rl_t		*rl;
665	int		max_blksz = zfsvfs->z_max_blksz;
666	int		error = 0;
667	arc_buf_t	*abuf;
668	iovec_t		*aiov = NULL;
669	xuio_t		*xuio = NULL;
670	int		i_iov = 0;
671	int		iovcnt = uio->uio_iovcnt;
672	iovec_t		*iovp = uio->uio_iov;
673	int		write_eof;
674	int		count = 0;
675	sa_bulk_attr_t	bulk[4];
676	uint64_t	mtime[2], ctime[2];
677
678	/*
679	 * Fasttrack empty write
680	 */
681	n = start_resid;
682	if (n == 0)
683		return (0);
684
685	if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
686		limit = MAXOFFSET_T;
687
688	ZFS_ENTER(zfsvfs);
689	ZFS_VERIFY_ZP(zp);
690
691	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
692	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
693	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
694	    &zp->z_size, 8);
695	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
696	    &zp->z_pflags, 8);
697
698	/*
699	 * In a case vp->v_vfsp != zp->z_zfsvfs->z_vfs (e.g. snapshots) our
700	 * callers might not be able to detect properly that we are read-only,
701	 * so check it explicitly here.
702	 */
703	if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
704		ZFS_EXIT(zfsvfs);
705		return (SET_ERROR(EROFS));
706	}
707
708	/*
709	 * If immutable or not appending then return EPERM
710	 */
711	if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
712	    ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
713	    (uio->uio_loffset < zp->z_size))) {
714		ZFS_EXIT(zfsvfs);
715		return (SET_ERROR(EPERM));
716	}
717
718	zilog = zfsvfs->z_log;
719
720	/*
721	 * Validate file offset
722	 */
723	woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
724	if (woff < 0) {
725		ZFS_EXIT(zfsvfs);
726		return (SET_ERROR(EINVAL));
727	}
728
729	/*
730	 * Check for mandatory locks before calling zfs_range_lock()
731	 * in order to prevent a deadlock with locks set via fcntl().
732	 */
733	if (MANDMODE((mode_t)zp->z_mode) &&
734	    (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
735		ZFS_EXIT(zfsvfs);
736		return (error);
737	}
738
739	/*
740	 * Pre-fault the pages to ensure slow (eg NFS) pages
741	 * don't hold up txg.
742	 * Skip this if uio contains loaned arc_buf.
743	 */
744	if ((uio->uio_extflg == UIO_XUIO) &&
745	    (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
746		xuio = (xuio_t *)uio;
747	else
748		uio_prefaultpages(MIN(n, max_blksz), uio);
749
750	/*
751	 * If in append mode, set the io offset pointer to eof.
752	 */
753	if (ioflag & FAPPEND) {
754		/*
755		 * Obtain an appending range lock to guarantee file append
756		 * semantics.  We reset the write offset once we have the lock.
757		 */
758		rl = zfs_range_lock(zp, 0, n, RL_APPEND);
759		woff = rl->r_off;
760		if (rl->r_len == UINT64_MAX) {
761			/*
762			 * We overlocked the file because this write will cause
763			 * the file block size to increase.
764			 * Note that zp_size cannot change with this lock held.
765			 */
766			woff = zp->z_size;
767		}
768		uio->uio_loffset = woff;
769	} else {
770		/*
771		 * Note that if the file block size will change as a result of
772		 * this write, then this range lock will lock the entire file
773		 * so that we can re-write the block safely.
774		 */
775		rl = zfs_range_lock(zp, woff, n, RL_WRITER);
776	}
777
778	if (woff >= limit) {
779		zfs_range_unlock(rl);
780		ZFS_EXIT(zfsvfs);
781		return (SET_ERROR(EFBIG));
782	}
783
784	if ((woff + n) > limit || woff > (limit - n))
785		n = limit - woff;
786
787	/* Will this write extend the file length? */
788	write_eof = (woff + n > zp->z_size);
789
790	end_size = MAX(zp->z_size, woff + n);
791
792	/*
793	 * Write the file in reasonable size chunks.  Each chunk is written
794	 * in a separate transaction; this keeps the intent log records small
795	 * and allows us to do more fine-grained space accounting.
796	 */
797	while (n > 0) {
798		abuf = NULL;
799		woff = uio->uio_loffset;
800		if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
801		    zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
802			if (abuf != NULL)
803				dmu_return_arcbuf(abuf);
804			error = SET_ERROR(EDQUOT);
805			break;
806		}
807
808		if (xuio && abuf == NULL) {
809			ASSERT(i_iov < iovcnt);
810			aiov = &iovp[i_iov];
811			abuf = dmu_xuio_arcbuf(xuio, i_iov);
812			dmu_xuio_clear(xuio, i_iov);
813			DTRACE_PROBE3(zfs_cp_write, int, i_iov,
814			    iovec_t *, aiov, arc_buf_t *, abuf);
815			ASSERT((aiov->iov_base == abuf->b_data) ||
816			    ((char *)aiov->iov_base - (char *)abuf->b_data +
817			    aiov->iov_len == arc_buf_size(abuf)));
818			i_iov++;
819		} else if (abuf == NULL && n >= max_blksz &&
820		    woff >= zp->z_size &&
821		    P2PHASE(woff, max_blksz) == 0 &&
822		    zp->z_blksz == max_blksz) {
823			/*
824			 * This write covers a full block.  "Borrow" a buffer
825			 * from the dmu so that we can fill it before we enter
826			 * a transaction.  This avoids the possibility of
827			 * holding up the transaction if the data copy hangs
828			 * up on a pagefault (e.g., from an NFS server mapping).
829			 */
830			size_t cbytes;
831
832			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
833			    max_blksz);
834			ASSERT(abuf != NULL);
835			ASSERT(arc_buf_size(abuf) == max_blksz);
836			if (error = uiocopy(abuf->b_data, max_blksz,
837			    UIO_WRITE, uio, &cbytes)) {
838				dmu_return_arcbuf(abuf);
839				break;
840			}
841			ASSERT(cbytes == max_blksz);
842		}
843
844		/*
845		 * Start a transaction.
846		 */
847		tx = dmu_tx_create(zfsvfs->z_os);
848		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
849		dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
850		zfs_sa_upgrade_txholds(tx, zp);
851		error = dmu_tx_assign(tx, TXG_WAIT);
852		if (error) {
853			dmu_tx_abort(tx);
854			if (abuf != NULL)
855				dmu_return_arcbuf(abuf);
856			break;
857		}
858
859		/*
860		 * If zfs_range_lock() over-locked we grow the blocksize
861		 * and then reduce the lock range.  This will only happen
862		 * on the first iteration since zfs_range_reduce() will
863		 * shrink down r_len to the appropriate size.
864		 */
865		if (rl->r_len == UINT64_MAX) {
866			uint64_t new_blksz;
867
868			if (zp->z_blksz > max_blksz) {
869				/*
870				 * File's blocksize is already larger than the
871				 * "recordsize" property.  Only let it grow to
872				 * the next power of 2.
873				 */
874				ASSERT(!ISP2(zp->z_blksz));
875				new_blksz = MIN(end_size,
876				    1 << highbit64(zp->z_blksz));
877			} else {
878				new_blksz = MIN(end_size, max_blksz);
879			}
880			zfs_grow_blocksize(zp, new_blksz, tx);
881			zfs_range_reduce(rl, woff, n);
882		}
883
884		/*
885		 * XXX - should we really limit each write to z_max_blksz?
886		 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
887		 */
888		nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
889
890		if (abuf == NULL) {
891			tx_bytes = uio->uio_resid;
892			error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
893			    uio, nbytes, tx);
894			tx_bytes -= uio->uio_resid;
895		} else {
896			tx_bytes = nbytes;
897			ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
898			/*
899			 * If this is not a full block write, but we are
900			 * extending the file past EOF and this data starts
901			 * block-aligned, use assign_arcbuf().  Otherwise,
902			 * write via dmu_write().
903			 */
904			if (tx_bytes < max_blksz && (!write_eof ||
905			    aiov->iov_base != abuf->b_data)) {
906				ASSERT(xuio);
907				dmu_write(zfsvfs->z_os, zp->z_id, woff,
908				    aiov->iov_len, aiov->iov_base, tx);
909				dmu_return_arcbuf(abuf);
910				xuio_stat_wbuf_copied();
911			} else {
912				ASSERT(xuio || tx_bytes == max_blksz);
913				dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
914				    woff, abuf, tx);
915			}
916			ASSERT(tx_bytes <= uio->uio_resid);
917			uioskip(uio, tx_bytes);
918		}
919		if (tx_bytes && vn_has_cached_data(vp)) {
920			update_pages(vp, woff,
921			    tx_bytes, zfsvfs->z_os, zp->z_id);
922		}
923
924		/*
925		 * If we made no progress, we're done.  If we made even
926		 * partial progress, update the znode and ZIL accordingly.
927		 */
928		if (tx_bytes == 0) {
929			(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
930			    (void *)&zp->z_size, sizeof (uint64_t), tx);
931			dmu_tx_commit(tx);
932			ASSERT(error != 0);
933			break;
934		}
935
936		/*
937		 * Clear Set-UID/Set-GID bits on successful write if not
938		 * privileged and at least one of the excute bits is set.
939		 *
940		 * It would be nice to to this after all writes have
941		 * been done, but that would still expose the ISUID/ISGID
942		 * to another app after the partial write is committed.
943		 *
944		 * Note: we don't call zfs_fuid_map_id() here because
945		 * user 0 is not an ephemeral uid.
946		 */
947		mutex_enter(&zp->z_acl_lock);
948		if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
949		    (S_IXUSR >> 6))) != 0 &&
950		    (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
951		    secpolicy_vnode_setid_retain(cr,
952		    (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
953			uint64_t newmode;
954			zp->z_mode &= ~(S_ISUID | S_ISGID);
955			newmode = zp->z_mode;
956			(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
957			    (void *)&newmode, sizeof (uint64_t), tx);
958		}
959		mutex_exit(&zp->z_acl_lock);
960
961		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
962		    B_TRUE);
963
964		/*
965		 * Update the file size (zp_size) if it has changed;
966		 * account for possible concurrent updates.
967		 */
968		while ((end_size = zp->z_size) < uio->uio_loffset) {
969			(void) atomic_cas_64(&zp->z_size, end_size,
970			    uio->uio_loffset);
971			ASSERT(error == 0);
972		}
973		/*
974		 * If we are replaying and eof is non zero then force
975		 * the file size to the specified eof. Note, there's no
976		 * concurrency during replay.
977		 */
978		if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
979			zp->z_size = zfsvfs->z_replay_eof;
980
981		error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
982
983		zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
984		dmu_tx_commit(tx);
985
986		if (error != 0)
987			break;
988		ASSERT(tx_bytes == nbytes);
989		n -= nbytes;
990
991		if (!xuio && n > 0)
992			uio_prefaultpages(MIN(n, max_blksz), uio);
993	}
994
995	zfs_range_unlock(rl);
996
997	/*
998	 * If we're in replay mode, or we made no progress, return error.
999	 * Otherwise, it's at least a partial write, so it's successful.
1000	 */
1001	if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
1002		ZFS_EXIT(zfsvfs);
1003		return (error);
1004	}
1005
1006	if (ioflag & (FSYNC | FDSYNC) ||
1007	    zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1008		zil_commit(zilog, zp->z_id);
1009
1010	ZFS_EXIT(zfsvfs);
1011	return (0);
1012}
1013
1014void
1015zfs_get_done(zgd_t *zgd, int error)
1016{
1017	znode_t *zp = zgd->zgd_private;
1018	objset_t *os = zp->z_zfsvfs->z_os;
1019
1020	if (zgd->zgd_db)
1021		dmu_buf_rele(zgd->zgd_db, zgd);
1022
1023	zfs_range_unlock(zgd->zgd_rl);
1024
1025	/*
1026	 * Release the vnode asynchronously as we currently have the
1027	 * txg stopped from syncing.
1028	 */
1029	VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1030
1031	if (error == 0 && zgd->zgd_bp)
1032		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1033
1034	kmem_free(zgd, sizeof (zgd_t));
1035}
1036
1037#ifdef DEBUG
1038static int zil_fault_io = 0;
1039#endif
1040
1041/*
1042 * Get data to generate a TX_WRITE intent log record.
1043 */
1044int
1045zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1046{
1047	zfsvfs_t *zfsvfs = arg;
1048	objset_t *os = zfsvfs->z_os;
1049	znode_t *zp;
1050	uint64_t object = lr->lr_foid;
1051	uint64_t offset = lr->lr_offset;
1052	uint64_t size = lr->lr_length;
1053	blkptr_t *bp = &lr->lr_blkptr;
1054	dmu_buf_t *db;
1055	zgd_t *zgd;
1056	int error = 0;
1057
1058	ASSERT(zio != NULL);
1059	ASSERT(size != 0);
1060
1061	/*
1062	 * Nothing to do if the file has been removed
1063	 */
1064	if (zfs_zget(zfsvfs, object, &zp) != 0)
1065		return (SET_ERROR(ENOENT));
1066	if (zp->z_unlinked) {
1067		/*
1068		 * Release the vnode asynchronously as we currently have the
1069		 * txg stopped from syncing.
1070		 */
1071		VN_RELE_ASYNC(ZTOV(zp),
1072		    dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1073		return (SET_ERROR(ENOENT));
1074	}
1075
1076	zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1077	zgd->zgd_zilog = zfsvfs->z_log;
1078	zgd->zgd_private = zp;
1079
1080	/*
1081	 * Write records come in two flavors: immediate and indirect.
1082	 * For small writes it's cheaper to store the data with the
1083	 * log record (immediate); for large writes it's cheaper to
1084	 * sync the data and get a pointer to it (indirect) so that
1085	 * we don't have to write the data twice.
1086	 */
1087	if (buf != NULL) { /* immediate write */
1088		zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
1089		/* test for truncation needs to be done while range locked */
1090		if (offset >= zp->z_size) {
1091			error = SET_ERROR(ENOENT);
1092		} else {
1093			error = dmu_read(os, object, offset, size, buf,
1094			    DMU_READ_NO_PREFETCH);
1095		}
1096		ASSERT(error == 0 || error == ENOENT);
1097	} else { /* indirect write */
1098		/*
1099		 * Have to lock the whole block to ensure when it's
1100		 * written out and it's checksum is being calculated
1101		 * that no one can change the data. We need to re-check
1102		 * blocksize after we get the lock in case it's changed!
1103		 */
1104		for (;;) {
1105			uint64_t blkoff;
1106			size = zp->z_blksz;
1107			blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1108			offset -= blkoff;
1109			zgd->zgd_rl = zfs_range_lock(zp, offset, size,
1110			    RL_READER);
1111			if (zp->z_blksz == size)
1112				break;
1113			offset += blkoff;
1114			zfs_range_unlock(zgd->zgd_rl);
1115		}
1116		/* test for truncation needs to be done while range locked */
1117		if (lr->lr_offset >= zp->z_size)
1118			error = SET_ERROR(ENOENT);
1119#ifdef DEBUG
1120		if (zil_fault_io) {
1121			error = SET_ERROR(EIO);
1122			zil_fault_io = 0;
1123		}
1124#endif
1125		if (error == 0)
1126			error = dmu_buf_hold(os, object, offset, zgd, &db,
1127			    DMU_READ_NO_PREFETCH);
1128
1129		if (error == 0) {
1130			blkptr_t *obp = dmu_buf_get_blkptr(db);
1131			if (obp) {
1132				ASSERT(BP_IS_HOLE(bp));
1133				*bp = *obp;
1134			}
1135
1136			zgd->zgd_db = db;
1137			zgd->zgd_bp = bp;
1138
1139			ASSERT(db->db_offset == offset);
1140			ASSERT(db->db_size == size);
1141
1142			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1143			    zfs_get_done, zgd);
1144			ASSERT(error || lr->lr_length <= zp->z_blksz);
1145
1146			/*
1147			 * On success, we need to wait for the write I/O
1148			 * initiated by dmu_sync() to complete before we can
1149			 * release this dbuf.  We will finish everything up
1150			 * in the zfs_get_done() callback.
1151			 */
1152			if (error == 0)
1153				return (0);
1154
1155			if (error == EALREADY) {
1156				lr->lr_common.lrc_txtype = TX_WRITE2;
1157				error = 0;
1158			}
1159		}
1160	}
1161
1162	zfs_get_done(zgd, error);
1163
1164	return (error);
1165}
1166
1167/*ARGSUSED*/
1168static int
1169zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
1170    caller_context_t *ct)
1171{
1172	znode_t *zp = VTOZ(vp);
1173	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1174	int error;
1175
1176	ZFS_ENTER(zfsvfs);
1177	ZFS_VERIFY_ZP(zp);
1178
1179	if (flag & V_ACE_MASK)
1180		error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1181	else
1182		error = zfs_zaccess_rwx(zp, mode, flag, cr);
1183
1184	ZFS_EXIT(zfsvfs);
1185	return (error);
1186}
1187
1188/*
1189 * If vnode is for a device return a specfs vnode instead.
1190 */
1191static int
1192specvp_check(vnode_t **vpp, cred_t *cr)
1193{
1194	int error = 0;
1195
1196	if (IS_DEVVP(*vpp)) {
1197		struct vnode *svp;
1198
1199		svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1200		VN_RELE(*vpp);
1201		if (svp == NULL)
1202			error = SET_ERROR(ENOSYS);
1203		*vpp = svp;
1204	}
1205	return (error);
1206}
1207
1208
1209/*
1210 * Lookup an entry in a directory, or an extended attribute directory.
1211 * If it exists, return a held vnode reference for it.
1212 *
1213 *	IN:	dvp	- vnode of directory to search.
1214 *		nm	- name of entry to lookup.
1215 *		pnp	- full pathname to lookup [UNUSED].
1216 *		flags	- LOOKUP_XATTR set if looking for an attribute.
1217 *		rdir	- root directory vnode [UNUSED].
1218 *		cr	- credentials of caller.
1219 *		ct	- caller context
1220 *		direntflags - directory lookup flags
1221 *		realpnp - returned pathname.
1222 *
1223 *	OUT:	vpp	- vnode of located entry, NULL if not found.
1224 *
1225 *	RETURN:	0 on success, error code on failure.
1226 *
1227 * Timestamps:
1228 *	NA
1229 */
1230/* ARGSUSED */
1231static int
1232zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1233    int flags, vnode_t *rdir, cred_t *cr,  caller_context_t *ct,
1234    int *direntflags, pathname_t *realpnp)
1235{
1236	znode_t *zdp = VTOZ(dvp);
1237	zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
1238	int	error = 0;
1239
1240	/* fast path */
1241	if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1242
1243		if (dvp->v_type != VDIR) {
1244			return (SET_ERROR(ENOTDIR));
1245		} else if (zdp->z_sa_hdl == NULL) {
1246			return (SET_ERROR(EIO));
1247		}
1248
1249		if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1250			error = zfs_fastaccesschk_execute(zdp, cr);
1251			if (!error) {
1252				*vpp = dvp;
1253				VN_HOLD(*vpp);
1254				return (0);
1255			}
1256			return (error);
1257		} else {
1258			vnode_t *tvp = dnlc_lookup(dvp, nm);
1259
1260			if (tvp) {
1261				error = zfs_fastaccesschk_execute(zdp, cr);
1262				if (error) {
1263					VN_RELE(tvp);
1264					return (error);
1265				}
1266				if (tvp == DNLC_NO_VNODE) {
1267					VN_RELE(tvp);
1268					return (SET_ERROR(ENOENT));
1269				} else {
1270					*vpp = tvp;
1271					return (specvp_check(vpp, cr));
1272				}
1273			}
1274		}
1275	}
1276
1277	DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm);
1278
1279	ZFS_ENTER(zfsvfs);
1280	ZFS_VERIFY_ZP(zdp);
1281
1282	*vpp = NULL;
1283
1284	if (flags & LOOKUP_XATTR) {
1285		/*
1286		 * If the xattr property is off, refuse the lookup request.
1287		 */
1288		if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
1289			ZFS_EXIT(zfsvfs);
1290			return (SET_ERROR(EINVAL));
1291		}
1292
1293		/*
1294		 * We don't allow recursive attributes..
1295		 * Maybe someday we will.
1296		 */
1297		if (zdp->z_pflags & ZFS_XATTR) {
1298			ZFS_EXIT(zfsvfs);
1299			return (SET_ERROR(EINVAL));
1300		}
1301
1302		if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) {
1303			ZFS_EXIT(zfsvfs);
1304			return (error);
1305		}
1306
1307		/*
1308		 * Do we have permission to get into attribute directory?
1309		 */
1310
1311		if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0,
1312		    B_FALSE, cr)) {
1313			VN_RELE(*vpp);
1314			*vpp = NULL;
1315		}
1316
1317		ZFS_EXIT(zfsvfs);
1318		return (error);
1319	}
1320
1321	if (dvp->v_type != VDIR) {
1322		ZFS_EXIT(zfsvfs);
1323		return (SET_ERROR(ENOTDIR));
1324	}
1325
1326	/*
1327	 * Check accessibility of directory.
1328	 */
1329
1330	if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) {
1331		ZFS_EXIT(zfsvfs);
1332		return (error);
1333	}
1334
1335	if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1336	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1337		ZFS_EXIT(zfsvfs);
1338		return (SET_ERROR(EILSEQ));
1339	}
1340
1341	error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp);
1342	if (error == 0)
1343		error = specvp_check(vpp, cr);
1344
1345	ZFS_EXIT(zfsvfs);
1346	return (error);
1347}
1348
1349/*
1350 * Attempt to create a new entry in a directory.  If the entry
1351 * already exists, truncate the file if permissible, else return
1352 * an error.  Return the vp of the created or trunc'd file.
1353 *
1354 *	IN:	dvp	- vnode of directory to put new file entry in.
1355 *		name	- name of new file entry.
1356 *		vap	- attributes of new file.
1357 *		excl	- flag indicating exclusive or non-exclusive mode.
1358 *		mode	- mode to open file with.
1359 *		cr	- credentials of caller.
1360 *		flag	- large file flag [UNUSED].
1361 *		ct	- caller context
1362 *		vsecp	- ACL to be set
1363 *
1364 *	OUT:	vpp	- vnode of created or trunc'd entry.
1365 *
1366 *	RETURN:	0 on success, error code on failure.
1367 *
1368 * Timestamps:
1369 *	dvp - ctime|mtime updated if new entry created
1370 *	 vp - ctime|mtime always, atime if new
1371 */
1372
1373/* ARGSUSED */
1374static int
1375zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl,
1376    int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct,
1377    vsecattr_t *vsecp)
1378{
1379	znode_t		*zp, *dzp = VTOZ(dvp);
1380	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1381	zilog_t		*zilog;
1382	objset_t	*os;
1383	zfs_dirlock_t	*dl;
1384	dmu_tx_t	*tx;
1385	int		error;
1386	ksid_t		*ksid;
1387	uid_t		uid;
1388	gid_t		gid = crgetgid(cr);
1389	zfs_acl_ids_t   acl_ids;
1390	boolean_t	fuid_dirtied;
1391	boolean_t	have_acl = B_FALSE;
1392	boolean_t	waited = B_FALSE;
1393
1394	/*
1395	 * If we have an ephemeral id, ACL, or XVATTR then
1396	 * make sure file system is at proper version
1397	 */
1398
1399	ksid = crgetsid(cr, KSID_OWNER);
1400	if (ksid)
1401		uid = ksid_getid(ksid);
1402	else
1403		uid = crgetuid(cr);
1404
1405	if (zfsvfs->z_use_fuids == B_FALSE &&
1406	    (vsecp || (vap->va_mask & AT_XVATTR) ||
1407	    IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1408		return (SET_ERROR(EINVAL));
1409
1410	ZFS_ENTER(zfsvfs);
1411	ZFS_VERIFY_ZP(dzp);
1412	os = zfsvfs->z_os;
1413	zilog = zfsvfs->z_log;
1414
1415	if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1416	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1417		ZFS_EXIT(zfsvfs);
1418		return (SET_ERROR(EILSEQ));
1419	}
1420
1421	if (vap->va_mask & AT_XVATTR) {
1422		if ((error = secpolicy_xvattr((xvattr_t *)vap,
1423		    crgetuid(cr), cr, vap->va_type)) != 0) {
1424			ZFS_EXIT(zfsvfs);
1425			return (error);
1426		}
1427	}
1428top:
1429	*vpp = NULL;
1430
1431	if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr))
1432		vap->va_mode &= ~VSVTX;
1433
1434	if (*name == '\0') {
1435		/*
1436		 * Null component name refers to the directory itself.
1437		 */
1438		VN_HOLD(dvp);
1439		zp = dzp;
1440		dl = NULL;
1441		error = 0;
1442	} else {
1443		/* possible VN_HOLD(zp) */
1444		int zflg = 0;
1445
1446		if (flag & FIGNORECASE)
1447			zflg |= ZCILOOK;
1448
1449		error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1450		    NULL, NULL);
1451		if (error) {
1452			if (have_acl)
1453				zfs_acl_ids_free(&acl_ids);
1454			if (strcmp(name, "..") == 0)
1455				error = SET_ERROR(EISDIR);
1456			ZFS_EXIT(zfsvfs);
1457			return (error);
1458		}
1459	}
1460
1461	if (zp == NULL) {
1462		uint64_t txtype;
1463
1464		/*
1465		 * Create a new file object and update the directory
1466		 * to reference it.
1467		 */
1468		if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
1469			if (have_acl)
1470				zfs_acl_ids_free(&acl_ids);
1471			goto out;
1472		}
1473
1474		/*
1475		 * We only support the creation of regular files in
1476		 * extended attribute directories.
1477		 */
1478
1479		if ((dzp->z_pflags & ZFS_XATTR) &&
1480		    (vap->va_type != VREG)) {
1481			if (have_acl)
1482				zfs_acl_ids_free(&acl_ids);
1483			error = SET_ERROR(EINVAL);
1484			goto out;
1485		}
1486
1487		if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1488		    cr, vsecp, &acl_ids)) != 0)
1489			goto out;
1490		have_acl = B_TRUE;
1491
1492		if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1493			zfs_acl_ids_free(&acl_ids);
1494			error = SET_ERROR(EDQUOT);
1495			goto out;
1496		}
1497
1498		tx = dmu_tx_create(os);
1499
1500		dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1501		    ZFS_SA_BASE_ATTR_SIZE);
1502
1503		fuid_dirtied = zfsvfs->z_fuid_dirty;
1504		if (fuid_dirtied)
1505			zfs_fuid_txhold(zfsvfs, tx);
1506		dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1507		dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1508		if (!zfsvfs->z_use_sa &&
1509		    acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1510			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1511			    0, acl_ids.z_aclp->z_acl_bytes);
1512		}
1513		error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
1514		if (error) {
1515			zfs_dirent_unlock(dl);
1516			if (error == ERESTART) {
1517				waited = B_TRUE;
1518				dmu_tx_wait(tx);
1519				dmu_tx_abort(tx);
1520				goto top;
1521			}
1522			zfs_acl_ids_free(&acl_ids);
1523			dmu_tx_abort(tx);
1524			ZFS_EXIT(zfsvfs);
1525			return (error);
1526		}
1527		zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1528
1529		if (fuid_dirtied)
1530			zfs_fuid_sync(zfsvfs, tx);
1531
1532		(void) zfs_link_create(dl, zp, tx, ZNEW);
1533		txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1534		if (flag & FIGNORECASE)
1535			txtype |= TX_CI;
1536		zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1537		    vsecp, acl_ids.z_fuidp, vap);
1538		zfs_acl_ids_free(&acl_ids);
1539		dmu_tx_commit(tx);
1540	} else {
1541		int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1542
1543		if (have_acl)
1544			zfs_acl_ids_free(&acl_ids);
1545		have_acl = B_FALSE;
1546
1547		/*
1548		 * A directory entry already exists for this name.
1549		 */
1550		/*
1551		 * Can't truncate an existing file if in exclusive mode.
1552		 */
1553		if (excl == EXCL) {
1554			error = SET_ERROR(EEXIST);
1555			goto out;
1556		}
1557		/*
1558		 * Can't open a directory for writing.
1559		 */
1560		if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) {
1561			error = SET_ERROR(EISDIR);
1562			goto out;
1563		}
1564		/*
1565		 * Verify requested access to file.
1566		 */
1567		if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1568			goto out;
1569		}
1570
1571		mutex_enter(&dzp->z_lock);
1572		dzp->z_seq++;
1573		mutex_exit(&dzp->z_lock);
1574
1575		/*
1576		 * Truncate regular files if requested.
1577		 */
1578		if ((ZTOV(zp)->v_type == VREG) &&
1579		    (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) {
1580			/* we can't hold any locks when calling zfs_freesp() */
1581			zfs_dirent_unlock(dl);
1582			dl = NULL;
1583			error = zfs_freesp(zp, 0, 0, mode, TRUE);
1584			if (error == 0) {
1585				vnevent_create(ZTOV(zp), ct);
1586			}
1587		}
1588	}
1589out:
1590
1591	if (dl)
1592		zfs_dirent_unlock(dl);
1593
1594	if (error) {
1595		if (zp)
1596			VN_RELE(ZTOV(zp));
1597	} else {
1598		*vpp = ZTOV(zp);
1599		error = specvp_check(vpp, cr);
1600	}
1601
1602	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1603		zil_commit(zilog, 0);
1604
1605	ZFS_EXIT(zfsvfs);
1606	return (error);
1607}
1608
1609/*
1610 * Remove an entry from a directory.
1611 *
1612 *	IN:	dvp	- vnode of directory to remove entry from.
1613 *		name	- name of entry to remove.
1614 *		cr	- credentials of caller.
1615 *		ct	- caller context
1616 *		flags	- case flags
1617 *
1618 *	RETURN:	0 on success, error code on failure.
1619 *
1620 * Timestamps:
1621 *	dvp - ctime|mtime
1622 *	 vp - ctime (if nlink > 0)
1623 */
1624
1625uint64_t null_xattr = 0;
1626
1627/*ARGSUSED*/
1628static int
1629zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct,
1630    int flags)
1631{
1632	znode_t		*zp, *dzp = VTOZ(dvp);
1633	znode_t		*xzp;
1634	vnode_t		*vp;
1635	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1636	zilog_t		*zilog;
1637	uint64_t	acl_obj, xattr_obj;
1638	uint64_t	xattr_obj_unlinked = 0;
1639	uint64_t	obj = 0;
1640	zfs_dirlock_t	*dl;
1641	dmu_tx_t	*tx;
1642	boolean_t	may_delete_now, delete_now = FALSE;
1643	boolean_t	unlinked, toobig = FALSE;
1644	uint64_t	txtype;
1645	pathname_t	*realnmp = NULL;
1646	pathname_t	realnm;
1647	int		error;
1648	int		zflg = ZEXISTS;
1649	boolean_t	waited = B_FALSE;
1650
1651	ZFS_ENTER(zfsvfs);
1652	ZFS_VERIFY_ZP(dzp);
1653	zilog = zfsvfs->z_log;
1654
1655	if (flags & FIGNORECASE) {
1656		zflg |= ZCILOOK;
1657		pn_alloc(&realnm);
1658		realnmp = &realnm;
1659	}
1660
1661top:
1662	xattr_obj = 0;
1663	xzp = NULL;
1664	/*
1665	 * Attempt to lock directory; fail if entry doesn't exist.
1666	 */
1667	if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1668	    NULL, realnmp)) {
1669		if (realnmp)
1670			pn_free(realnmp);
1671		ZFS_EXIT(zfsvfs);
1672		return (error);
1673	}
1674
1675	vp = ZTOV(zp);
1676
1677	if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1678		goto out;
1679	}
1680
1681	/*
1682	 * Need to use rmdir for removing directories.
1683	 */
1684	if (vp->v_type == VDIR) {
1685		error = SET_ERROR(EPERM);
1686		goto out;
1687	}
1688
1689	vnevent_remove(vp, dvp, name, ct);
1690
1691	if (realnmp)
1692		dnlc_remove(dvp, realnmp->pn_buf);
1693	else
1694		dnlc_remove(dvp, name);
1695
1696	mutex_enter(&vp->v_lock);
1697	may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp);
1698	mutex_exit(&vp->v_lock);
1699
1700	/*
1701	 * We may delete the znode now, or we may put it in the unlinked set;
1702	 * it depends on whether we're the last link, and on whether there are
1703	 * other holds on the vnode.  So we dmu_tx_hold() the right things to
1704	 * allow for either case.
1705	 */
1706	obj = zp->z_id;
1707	tx = dmu_tx_create(zfsvfs->z_os);
1708	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1709	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1710	zfs_sa_upgrade_txholds(tx, zp);
1711	zfs_sa_upgrade_txholds(tx, dzp);
1712	if (may_delete_now) {
1713		toobig =
1714		    zp->z_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT;
1715		/* if the file is too big, only hold_free a token amount */
1716		dmu_tx_hold_free(tx, zp->z_id, 0,
1717		    (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1718	}
1719
1720	/* are there any extended attributes? */
1721	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1722	    &xattr_obj, sizeof (xattr_obj));
1723	if (error == 0 && xattr_obj) {
1724		error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1725		ASSERT0(error);
1726		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1727		dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1728	}
1729
1730	mutex_enter(&zp->z_lock);
1731	if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1732		dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1733	mutex_exit(&zp->z_lock);
1734
1735	/* charge as an update -- would be nice not to charge at all */
1736	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1737
1738	/*
1739	 * Mark this transaction as typically resulting in a net free of space
1740	 */
1741	dmu_tx_mark_netfree(tx);
1742
1743	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
1744	if (error) {
1745		zfs_dirent_unlock(dl);
1746		VN_RELE(vp);
1747		if (xzp)
1748			VN_RELE(ZTOV(xzp));
1749		if (error == ERESTART) {
1750			waited = B_TRUE;
1751			dmu_tx_wait(tx);
1752			dmu_tx_abort(tx);
1753			goto top;
1754		}
1755		if (realnmp)
1756			pn_free(realnmp);
1757		dmu_tx_abort(tx);
1758		ZFS_EXIT(zfsvfs);
1759		return (error);
1760	}
1761
1762	/*
1763	 * Remove the directory entry.
1764	 */
1765	error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1766
1767	if (error) {
1768		dmu_tx_commit(tx);
1769		goto out;
1770	}
1771
1772	if (unlinked) {
1773		/*
1774		 * Hold z_lock so that we can make sure that the ACL obj
1775		 * hasn't changed.  Could have been deleted due to
1776		 * zfs_sa_upgrade().
1777		 */
1778		mutex_enter(&zp->z_lock);
1779		mutex_enter(&vp->v_lock);
1780		(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1781		    &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1782		delete_now = may_delete_now && !toobig &&
1783		    vp->v_count == 1 && !vn_has_cached_data(vp) &&
1784		    xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
1785		    acl_obj;
1786		mutex_exit(&vp->v_lock);
1787	}
1788
1789	if (delete_now) {
1790		if (xattr_obj_unlinked) {
1791			ASSERT3U(xzp->z_links, ==, 2);
1792			mutex_enter(&xzp->z_lock);
1793			xzp->z_unlinked = 1;
1794			xzp->z_links = 0;
1795			error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1796			    &xzp->z_links, sizeof (xzp->z_links), tx);
1797			ASSERT3U(error,  ==,  0);
1798			mutex_exit(&xzp->z_lock);
1799			zfs_unlinked_add(xzp, tx);
1800
1801			if (zp->z_is_sa)
1802				error = sa_remove(zp->z_sa_hdl,
1803				    SA_ZPL_XATTR(zfsvfs), tx);
1804			else
1805				error = sa_update(zp->z_sa_hdl,
1806				    SA_ZPL_XATTR(zfsvfs), &null_xattr,
1807				    sizeof (uint64_t), tx);
1808			ASSERT0(error);
1809		}
1810		mutex_enter(&vp->v_lock);
1811		vp->v_count--;
1812		ASSERT0(vp->v_count);
1813		mutex_exit(&vp->v_lock);
1814		mutex_exit(&zp->z_lock);
1815		zfs_znode_delete(zp, tx);
1816	} else if (unlinked) {
1817		mutex_exit(&zp->z_lock);
1818		zfs_unlinked_add(zp, tx);
1819	}
1820
1821	txtype = TX_REMOVE;
1822	if (flags & FIGNORECASE)
1823		txtype |= TX_CI;
1824	zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1825
1826	dmu_tx_commit(tx);
1827out:
1828	if (realnmp)
1829		pn_free(realnmp);
1830
1831	zfs_dirent_unlock(dl);
1832
1833	if (!delete_now)
1834		VN_RELE(vp);
1835	if (xzp)
1836		VN_RELE(ZTOV(xzp));
1837
1838	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1839		zil_commit(zilog, 0);
1840
1841	ZFS_EXIT(zfsvfs);
1842	return (error);
1843}
1844
1845/*
1846 * Create a new directory and insert it into dvp using the name
1847 * provided.  Return a pointer to the inserted directory.
1848 *
1849 *	IN:	dvp	- vnode of directory to add subdir to.
1850 *		dirname	- name of new directory.
1851 *		vap	- attributes of new directory.
1852 *		cr	- credentials of caller.
1853 *		ct	- caller context
1854 *		flags	- case flags
1855 *		vsecp	- ACL to be set
1856 *
1857 *	OUT:	vpp	- vnode of created directory.
1858 *
1859 *	RETURN:	0 on success, error code on failure.
1860 *
1861 * Timestamps:
1862 *	dvp - ctime|mtime updated
1863 *	 vp - ctime|mtime|atime updated
1864 */
1865/*ARGSUSED*/
1866static int
1867zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr,
1868    caller_context_t *ct, int flags, vsecattr_t *vsecp)
1869{
1870	znode_t		*zp, *dzp = VTOZ(dvp);
1871	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1872	zilog_t		*zilog;
1873	zfs_dirlock_t	*dl;
1874	uint64_t	txtype;
1875	dmu_tx_t	*tx;
1876	int		error;
1877	int		zf = ZNEW;
1878	ksid_t		*ksid;
1879	uid_t		uid;
1880	gid_t		gid = crgetgid(cr);
1881	zfs_acl_ids_t   acl_ids;
1882	boolean_t	fuid_dirtied;
1883	boolean_t	waited = B_FALSE;
1884
1885	ASSERT(vap->va_type == VDIR);
1886
1887	/*
1888	 * If we have an ephemeral id, ACL, or XVATTR then
1889	 * make sure file system is at proper version
1890	 */
1891
1892	ksid = crgetsid(cr, KSID_OWNER);
1893	if (ksid)
1894		uid = ksid_getid(ksid);
1895	else
1896		uid = crgetuid(cr);
1897	if (zfsvfs->z_use_fuids == B_FALSE &&
1898	    (vsecp || (vap->va_mask & AT_XVATTR) ||
1899	    IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1900		return (SET_ERROR(EINVAL));
1901
1902	ZFS_ENTER(zfsvfs);
1903	ZFS_VERIFY_ZP(dzp);
1904	zilog = zfsvfs->z_log;
1905
1906	if (dzp->z_pflags & ZFS_XATTR) {
1907		ZFS_EXIT(zfsvfs);
1908		return (SET_ERROR(EINVAL));
1909	}
1910
1911	if (zfsvfs->z_utf8 && u8_validate(dirname,
1912	    strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1913		ZFS_EXIT(zfsvfs);
1914		return (SET_ERROR(EILSEQ));
1915	}
1916	if (flags & FIGNORECASE)
1917		zf |= ZCILOOK;
1918
1919	if (vap->va_mask & AT_XVATTR) {
1920		if ((error = secpolicy_xvattr((xvattr_t *)vap,
1921		    crgetuid(cr), cr, vap->va_type)) != 0) {
1922			ZFS_EXIT(zfsvfs);
1923			return (error);
1924		}
1925	}
1926
1927	if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1928	    vsecp, &acl_ids)) != 0) {
1929		ZFS_EXIT(zfsvfs);
1930		return (error);
1931	}
1932	/*
1933	 * First make sure the new directory doesn't exist.
1934	 *
1935	 * Existence is checked first to make sure we don't return
1936	 * EACCES instead of EEXIST which can cause some applications
1937	 * to fail.
1938	 */
1939top:
1940	*vpp = NULL;
1941
1942	if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1943	    NULL, NULL)) {
1944		zfs_acl_ids_free(&acl_ids);
1945		ZFS_EXIT(zfsvfs);
1946		return (error);
1947	}
1948
1949	if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) {
1950		zfs_acl_ids_free(&acl_ids);
1951		zfs_dirent_unlock(dl);
1952		ZFS_EXIT(zfsvfs);
1953		return (error);
1954	}
1955
1956	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1957		zfs_acl_ids_free(&acl_ids);
1958		zfs_dirent_unlock(dl);
1959		ZFS_EXIT(zfsvfs);
1960		return (SET_ERROR(EDQUOT));
1961	}
1962
1963	/*
1964	 * Add a new entry to the directory.
1965	 */
1966	tx = dmu_tx_create(zfsvfs->z_os);
1967	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1968	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1969	fuid_dirtied = zfsvfs->z_fuid_dirty;
1970	if (fuid_dirtied)
1971		zfs_fuid_txhold(zfsvfs, tx);
1972	if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1973		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1974		    acl_ids.z_aclp->z_acl_bytes);
1975	}
1976
1977	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1978	    ZFS_SA_BASE_ATTR_SIZE);
1979
1980	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
1981	if (error) {
1982		zfs_dirent_unlock(dl);
1983		if (error == ERESTART) {
1984			waited = B_TRUE;
1985			dmu_tx_wait(tx);
1986			dmu_tx_abort(tx);
1987			goto top;
1988		}
1989		zfs_acl_ids_free(&acl_ids);
1990		dmu_tx_abort(tx);
1991		ZFS_EXIT(zfsvfs);
1992		return (error);
1993	}
1994
1995	/*
1996	 * Create new node.
1997	 */
1998	zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1999
2000	if (fuid_dirtied)
2001		zfs_fuid_sync(zfsvfs, tx);
2002
2003	/*
2004	 * Now put new name in parent dir.
2005	 */
2006	(void) zfs_link_create(dl, zp, tx, ZNEW);
2007
2008	*vpp = ZTOV(zp);
2009
2010	txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
2011	if (flags & FIGNORECASE)
2012		txtype |= TX_CI;
2013	zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
2014	    acl_ids.z_fuidp, vap);
2015
2016	zfs_acl_ids_free(&acl_ids);
2017
2018	dmu_tx_commit(tx);
2019
2020	zfs_dirent_unlock(dl);
2021
2022	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2023		zil_commit(zilog, 0);
2024
2025	ZFS_EXIT(zfsvfs);
2026	return (0);
2027}
2028
2029/*
2030 * Remove a directory subdir entry.  If the current working
2031 * directory is the same as the subdir to be removed, the
2032 * remove will fail.
2033 *
2034 *	IN:	dvp	- vnode of directory to remove from.
2035 *		name	- name of directory to be removed.
2036 *		cwd	- vnode of current working directory.
2037 *		cr	- credentials of caller.
2038 *		ct	- caller context
2039 *		flags	- case flags
2040 *
2041 *	RETURN:	0 on success, error code on failure.
2042 *
2043 * Timestamps:
2044 *	dvp - ctime|mtime updated
2045 */
2046/*ARGSUSED*/
2047static int
2048zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
2049    caller_context_t *ct, int flags)
2050{
2051	znode_t		*dzp = VTOZ(dvp);
2052	znode_t		*zp;
2053	vnode_t		*vp;
2054	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
2055	zilog_t		*zilog;
2056	zfs_dirlock_t	*dl;
2057	dmu_tx_t	*tx;
2058	int		error;
2059	int		zflg = ZEXISTS;
2060	boolean_t	waited = B_FALSE;
2061
2062	ZFS_ENTER(zfsvfs);
2063	ZFS_VERIFY_ZP(dzp);
2064	zilog = zfsvfs->z_log;
2065
2066	if (flags & FIGNORECASE)
2067		zflg |= ZCILOOK;
2068top:
2069	zp = NULL;
2070
2071	/*
2072	 * Attempt to lock directory; fail if entry doesn't exist.
2073	 */
2074	if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
2075	    NULL, NULL)) {
2076		ZFS_EXIT(zfsvfs);
2077		return (error);
2078	}
2079
2080	vp = ZTOV(zp);
2081
2082	if (error = zfs_zaccess_delete(dzp, zp, cr)) {
2083		goto out;
2084	}
2085
2086	if (vp->v_type != VDIR) {
2087		error = SET_ERROR(ENOTDIR);
2088		goto out;
2089	}
2090
2091	if (vp == cwd) {
2092		error = SET_ERROR(EINVAL);
2093		goto out;
2094	}
2095
2096	vnevent_rmdir(vp, dvp, name, ct);
2097
2098	/*
2099	 * Grab a lock on the directory to make sure that noone is
2100	 * trying to add (or lookup) entries while we are removing it.
2101	 */
2102	rw_enter(&zp->z_name_lock, RW_WRITER);
2103
2104	/*
2105	 * Grab a lock on the parent pointer to make sure we play well
2106	 * with the treewalk and directory rename code.
2107	 */
2108	rw_enter(&zp->z_parent_lock, RW_WRITER);
2109
2110	tx = dmu_tx_create(zfsvfs->z_os);
2111	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2112	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2113	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2114	zfs_sa_upgrade_txholds(tx, zp);
2115	zfs_sa_upgrade_txholds(tx, dzp);
2116	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
2117	if (error) {
2118		rw_exit(&zp->z_parent_lock);
2119		rw_exit(&zp->z_name_lock);
2120		zfs_dirent_unlock(dl);
2121		VN_RELE(vp);
2122		if (error == ERESTART) {
2123			waited = B_TRUE;
2124			dmu_tx_wait(tx);
2125			dmu_tx_abort(tx);
2126			goto top;
2127		}
2128		dmu_tx_abort(tx);
2129		ZFS_EXIT(zfsvfs);
2130		return (error);
2131	}
2132
2133	error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2134
2135	if (error == 0) {
2136		uint64_t txtype = TX_RMDIR;
2137		if (flags & FIGNORECASE)
2138			txtype |= TX_CI;
2139		zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2140	}
2141
2142	dmu_tx_commit(tx);
2143
2144	rw_exit(&zp->z_parent_lock);
2145	rw_exit(&zp->z_name_lock);
2146out:
2147	zfs_dirent_unlock(dl);
2148
2149	VN_RELE(vp);
2150
2151	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2152		zil_commit(zilog, 0);
2153
2154	ZFS_EXIT(zfsvfs);
2155	return (error);
2156}
2157
2158/*
2159 * Read as many directory entries as will fit into the provided
2160 * buffer from the given directory cursor position (specified in
2161 * the uio structure).
2162 *
2163 *	IN:	vp	- vnode of directory to read.
2164 *		uio	- structure supplying read location, range info,
2165 *			  and return buffer.
2166 *		cr	- credentials of caller.
2167 *		ct	- caller context
2168 *		flags	- case flags
2169 *
2170 *	OUT:	uio	- updated offset and range, buffer filled.
2171 *		eofp	- set to true if end-of-file detected.
2172 *
2173 *	RETURN:	0 on success, error code on failure.
2174 *
2175 * Timestamps:
2176 *	vp - atime updated
2177 *
2178 * Note that the low 4 bits of the cookie returned by zap is always zero.
2179 * This allows us to use the low range for "special" directory entries:
2180 * We use 0 for '.', and 1 for '..'.  If this is the root of the filesystem,
2181 * we use the offset 2 for the '.zfs' directory.
2182 */
2183/* ARGSUSED */
2184static int
2185zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
2186    caller_context_t *ct, int flags)
2187{
2188	znode_t		*zp = VTOZ(vp);
2189	iovec_t		*iovp;
2190	edirent_t	*eodp;
2191	dirent64_t	*odp;
2192	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
2193	objset_t	*os;
2194	caddr_t		outbuf;
2195	size_t		bufsize;
2196	zap_cursor_t	zc;
2197	zap_attribute_t	zap;
2198	uint_t		bytes_wanted;
2199	uint64_t	offset; /* must be unsigned; checks for < 1 */
2200	uint64_t	parent;
2201	int		local_eof;
2202	int		outcount;
2203	int		error;
2204	uint8_t		prefetch;
2205	boolean_t	check_sysattrs;
2206
2207	ZFS_ENTER(zfsvfs);
2208	ZFS_VERIFY_ZP(zp);
2209
2210	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2211	    &parent, sizeof (parent))) != 0) {
2212		ZFS_EXIT(zfsvfs);
2213		return (error);
2214	}
2215
2216	/*
2217	 * If we are not given an eof variable,
2218	 * use a local one.
2219	 */
2220	if (eofp == NULL)
2221		eofp = &local_eof;
2222
2223	/*
2224	 * Check for valid iov_len.
2225	 */
2226	if (uio->uio_iov->iov_len <= 0) {
2227		ZFS_EXIT(zfsvfs);
2228		return (SET_ERROR(EINVAL));
2229	}
2230
2231	/*
2232	 * Quit if directory has been removed (posix)
2233	 */
2234	if ((*eofp = zp->z_unlinked) != 0) {
2235		ZFS_EXIT(zfsvfs);
2236		return (0);
2237	}
2238
2239	error = 0;
2240	os = zfsvfs->z_os;
2241	offset = uio->uio_loffset;
2242	prefetch = zp->z_zn_prefetch;
2243
2244	/*
2245	 * Initialize the iterator cursor.
2246	 */
2247	if (offset <= 3) {
2248		/*
2249		 * Start iteration from the beginning of the directory.
2250		 */
2251		zap_cursor_init(&zc, os, zp->z_id);
2252	} else {
2253		/*
2254		 * The offset is a serialized cursor.
2255		 */
2256		zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2257	}
2258
2259	/*
2260	 * Get space to change directory entries into fs independent format.
2261	 */
2262	iovp = uio->uio_iov;
2263	bytes_wanted = iovp->iov_len;
2264	if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) {
2265		bufsize = bytes_wanted;
2266		outbuf = kmem_alloc(bufsize, KM_SLEEP);
2267		odp = (struct dirent64 *)outbuf;
2268	} else {
2269		bufsize = bytes_wanted;
2270		outbuf = NULL;
2271		odp = (struct dirent64 *)iovp->iov_base;
2272	}
2273	eodp = (struct edirent *)odp;
2274
2275	/*
2276	 * If this VFS supports the system attribute view interface; and
2277	 * we're looking at an extended attribute directory; and we care
2278	 * about normalization conflicts on this vfs; then we must check
2279	 * for normalization conflicts with the sysattr name space.
2280	 */
2281	check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2282	    (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
2283	    (flags & V_RDDIR_ENTFLAGS);
2284
2285	/*
2286	 * Transform to file-system independent format
2287	 */
2288	outcount = 0;
2289	while (outcount < bytes_wanted) {
2290		ino64_t objnum;
2291		ushort_t reclen;
2292		off64_t *next = NULL;
2293
2294		/*
2295		 * Special case `.', `..', and `.zfs'.
2296		 */
2297		if (offset == 0) {
2298			(void) strcpy(zap.za_name, ".");
2299			zap.za_normalization_conflict = 0;
2300			objnum = zp->z_id;
2301		} else if (offset == 1) {
2302			(void) strcpy(zap.za_name, "..");
2303			zap.za_normalization_conflict = 0;
2304			objnum = parent;
2305		} else if (offset == 2 && zfs_show_ctldir(zp)) {
2306			(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2307			zap.za_normalization_conflict = 0;
2308			objnum = ZFSCTL_INO_ROOT;
2309		} else {
2310			/*
2311			 * Grab next entry.
2312			 */
2313			if (error = zap_cursor_retrieve(&zc, &zap)) {
2314				if ((*eofp = (error == ENOENT)) != 0)
2315					break;
2316				else
2317					goto update;
2318			}
2319
2320			if (zap.za_integer_length != 8 ||
2321			    zap.za_num_integers != 1) {
2322				cmn_err(CE_WARN, "zap_readdir: bad directory "
2323				    "entry, obj = %lld, offset = %lld\n",
2324				    (u_longlong_t)zp->z_id,
2325				    (u_longlong_t)offset);
2326				error = SET_ERROR(ENXIO);
2327				goto update;
2328			}
2329
2330			objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2331			/*
2332			 * MacOS X can extract the object type here such as:
2333			 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2334			 */
2335
2336			if (check_sysattrs && !zap.za_normalization_conflict) {
2337				zap.za_normalization_conflict =
2338				    xattr_sysattr_casechk(zap.za_name);
2339			}
2340		}
2341
2342		if (flags & V_RDDIR_ACCFILTER) {
2343			/*
2344			 * If we have no access at all, don't include
2345			 * this entry in the returned information
2346			 */
2347			znode_t	*ezp;
2348			if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
2349				goto skip_entry;
2350			if (!zfs_has_access(ezp, cr)) {
2351				VN_RELE(ZTOV(ezp));
2352				goto skip_entry;
2353			}
2354			VN_RELE(ZTOV(ezp));
2355		}
2356
2357		if (flags & V_RDDIR_ENTFLAGS)
2358			reclen = EDIRENT_RECLEN(strlen(zap.za_name));
2359		else
2360			reclen = DIRENT64_RECLEN(strlen(zap.za_name));
2361
2362		/*
2363		 * Will this entry fit in the buffer?
2364		 */
2365		if (outcount + reclen > bufsize) {
2366			/*
2367			 * Did we manage to fit anything in the buffer?
2368			 */
2369			if (!outcount) {
2370				error = SET_ERROR(EINVAL);
2371				goto update;
2372			}
2373			break;
2374		}
2375		if (flags & V_RDDIR_ENTFLAGS) {
2376			/*
2377			 * Add extended flag entry:
2378			 */
2379			eodp->ed_ino = objnum;
2380			eodp->ed_reclen = reclen;
2381			/* NOTE: ed_off is the offset for the *next* entry */
2382			next = &(eodp->ed_off);
2383			eodp->ed_eflags = zap.za_normalization_conflict ?
2384			    ED_CASE_CONFLICT : 0;
2385			(void) strncpy(eodp->ed_name, zap.za_name,
2386			    EDIRENT_NAMELEN(reclen));
2387			eodp = (edirent_t *)((intptr_t)eodp + reclen);
2388		} else {
2389			/*
2390			 * Add normal entry:
2391			 */
2392			odp->d_ino = objnum;
2393			odp->d_reclen = reclen;
2394			/* NOTE: d_off is the offset for the *next* entry */
2395			next = &(odp->d_off);
2396			(void) strncpy(odp->d_name, zap.za_name,
2397			    DIRENT64_NAMELEN(reclen));
2398			odp = (dirent64_t *)((intptr_t)odp + reclen);
2399		}
2400		outcount += reclen;
2401
2402		ASSERT(outcount <= bufsize);
2403
2404		/* Prefetch znode */
2405		if (prefetch)
2406			dmu_prefetch(os, objnum, 0, 0, 0,
2407			    ZIO_PRIORITY_SYNC_READ);
2408
2409	skip_entry:
2410		/*
2411		 * Move to the next entry, fill in the previous offset.
2412		 */
2413		if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2414			zap_cursor_advance(&zc);
2415			offset = zap_cursor_serialize(&zc);
2416		} else {
2417			offset += 1;
2418		}
2419		if (next)
2420			*next = offset;
2421	}
2422	zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2423
2424	if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) {
2425		iovp->iov_base += outcount;
2426		iovp->iov_len -= outcount;
2427		uio->uio_resid -= outcount;
2428	} else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) {
2429		/*
2430		 * Reset the pointer.
2431		 */
2432		offset = uio->uio_loffset;
2433	}
2434
2435update:
2436	zap_cursor_fini(&zc);
2437	if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
2438		kmem_free(outbuf, bufsize);
2439
2440	if (error == ENOENT)
2441		error = 0;
2442
2443	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
2444
2445	uio->uio_loffset = offset;
2446	ZFS_EXIT(zfsvfs);
2447	return (error);
2448}
2449
2450ulong_t zfs_fsync_sync_cnt = 4;
2451
2452static int
2453zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
2454{
2455	znode_t	*zp = VTOZ(vp);
2456	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2457
2458	/*
2459	 * Regardless of whether this is required for standards conformance,
2460	 * this is the logical behavior when fsync() is called on a file with
2461	 * dirty pages.  We use B_ASYNC since the ZIL transactions are already
2462	 * going to be pushed out as part of the zil_commit().
2463	 */
2464	if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) &&
2465	    (vp->v_type == VREG) && !(IS_SWAPVP(vp)))
2466		(void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct);
2467
2468	(void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2469
2470	if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2471		ZFS_ENTER(zfsvfs);
2472		ZFS_VERIFY_ZP(zp);
2473		zil_commit(zfsvfs->z_log, zp->z_id);
2474		ZFS_EXIT(zfsvfs);
2475	}
2476	return (0);
2477}
2478
2479
2480/*
2481 * Get the requested file attributes and place them in the provided
2482 * vattr structure.
2483 *
2484 *	IN:	vp	- vnode of file.
2485 *		vap	- va_mask identifies requested attributes.
2486 *			  If AT_XVATTR set, then optional attrs are requested
2487 *		flags	- ATTR_NOACLCHECK (CIFS server context)
2488 *		cr	- credentials of caller.
2489 *		ct	- caller context
2490 *
2491 *	OUT:	vap	- attribute values.
2492 *
2493 *	RETURN:	0 (always succeeds).
2494 */
2495/* ARGSUSED */
2496static int
2497zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2498    caller_context_t *ct)
2499{
2500	znode_t *zp = VTOZ(vp);
2501	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2502	int	error = 0;
2503	uint64_t links;
2504	uint64_t mtime[2], ctime[2];
2505	xvattr_t *xvap = (xvattr_t *)vap;	/* vap may be an xvattr_t * */
2506	xoptattr_t *xoap = NULL;
2507	boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2508	sa_bulk_attr_t bulk[2];
2509	int count = 0;
2510
2511	ZFS_ENTER(zfsvfs);
2512	ZFS_VERIFY_ZP(zp);
2513
2514	zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2515
2516	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2517	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2518
2519	if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2520		ZFS_EXIT(zfsvfs);
2521		return (error);
2522	}
2523
2524	/*
2525	 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2526	 * Also, if we are the owner don't bother, since owner should
2527	 * always be allowed to read basic attributes of file.
2528	 */
2529	if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2530	    (vap->va_uid != crgetuid(cr))) {
2531		if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2532		    skipaclchk, cr)) {
2533			ZFS_EXIT(zfsvfs);
2534			return (error);
2535		}
2536	}
2537
2538	/*
2539	 * Return all attributes.  It's cheaper to provide the answer
2540	 * than to determine whether we were asked the question.
2541	 */
2542
2543	mutex_enter(&zp->z_lock);
2544	vap->va_type = vp->v_type;
2545	vap->va_mode = zp->z_mode & MODEMASK;
2546	vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev;
2547	vap->va_nodeid = zp->z_id;
2548	if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp))
2549		links = zp->z_links + 1;
2550	else
2551		links = zp->z_links;
2552	vap->va_nlink = MIN(links, UINT32_MAX);	/* nlink_t limit! */
2553	vap->va_size = zp->z_size;
2554	vap->va_rdev = vp->v_rdev;
2555	vap->va_seq = zp->z_seq;
2556
2557	/*
2558	 * Add in any requested optional attributes and the create time.
2559	 * Also set the corresponding bits in the returned attribute bitmap.
2560	 */
2561	if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2562		if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2563			xoap->xoa_archive =
2564			    ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2565			XVA_SET_RTN(xvap, XAT_ARCHIVE);
2566		}
2567
2568		if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2569			xoap->xoa_readonly =
2570			    ((zp->z_pflags & ZFS_READONLY) != 0);
2571			XVA_SET_RTN(xvap, XAT_READONLY);
2572		}
2573
2574		if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2575			xoap->xoa_system =
2576			    ((zp->z_pflags & ZFS_SYSTEM) != 0);
2577			XVA_SET_RTN(xvap, XAT_SYSTEM);
2578		}
2579
2580		if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2581			xoap->xoa_hidden =
2582			    ((zp->z_pflags & ZFS_HIDDEN) != 0);
2583			XVA_SET_RTN(xvap, XAT_HIDDEN);
2584		}
2585
2586		if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2587			xoap->xoa_nounlink =
2588			    ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2589			XVA_SET_RTN(xvap, XAT_NOUNLINK);
2590		}
2591
2592		if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2593			xoap->xoa_immutable =
2594			    ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2595			XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2596		}
2597
2598		if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2599			xoap->xoa_appendonly =
2600			    ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2601			XVA_SET_RTN(xvap, XAT_APPENDONLY);
2602		}
2603
2604		if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2605			xoap->xoa_nodump =
2606			    ((zp->z_pflags & ZFS_NODUMP) != 0);
2607			XVA_SET_RTN(xvap, XAT_NODUMP);
2608		}
2609
2610		if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2611			xoap->xoa_opaque =
2612			    ((zp->z_pflags & ZFS_OPAQUE) != 0);
2613			XVA_SET_RTN(xvap, XAT_OPAQUE);
2614		}
2615
2616		if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2617			xoap->xoa_av_quarantined =
2618			    ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2619			XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2620		}
2621
2622		if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2623			xoap->xoa_av_modified =
2624			    ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2625			XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2626		}
2627
2628		if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2629		    vp->v_type == VREG) {
2630			zfs_sa_get_scanstamp(zp, xvap);
2631		}
2632
2633		if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2634			uint64_t times[2];
2635
2636			(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
2637			    times, sizeof (times));
2638			ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2639			XVA_SET_RTN(xvap, XAT_CREATETIME);
2640		}
2641
2642		if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2643			xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2644			XVA_SET_RTN(xvap, XAT_REPARSE);
2645		}
2646		if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2647			xoap->xoa_generation = zp->z_gen;
2648			XVA_SET_RTN(xvap, XAT_GEN);
2649		}
2650
2651		if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2652			xoap->xoa_offline =
2653			    ((zp->z_pflags & ZFS_OFFLINE) != 0);
2654			XVA_SET_RTN(xvap, XAT_OFFLINE);
2655		}
2656
2657		if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2658			xoap->xoa_sparse =
2659			    ((zp->z_pflags & ZFS_SPARSE) != 0);
2660			XVA_SET_RTN(xvap, XAT_SPARSE);
2661		}
2662	}
2663
2664	ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2665	ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2666	ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2667
2668	mutex_exit(&zp->z_lock);
2669
2670	sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2671
2672	if (zp->z_blksz == 0) {
2673		/*
2674		 * Block size hasn't been set; suggest maximal I/O transfers.
2675		 */
2676		vap->va_blksize = zfsvfs->z_max_blksz;
2677	}
2678
2679	ZFS_EXIT(zfsvfs);
2680	return (0);
2681}
2682
2683/*
2684 * Set the file attributes to the values contained in the
2685 * vattr structure.
2686 *
2687 *	IN:	vp	- vnode of file to be modified.
2688 *		vap	- new attribute values.
2689 *			  If AT_XVATTR set, then optional attrs are being set
2690 *		flags	- ATTR_UTIME set if non-default time values provided.
2691 *			- ATTR_NOACLCHECK (CIFS context only).
2692 *		cr	- credentials of caller.
2693 *		ct	- caller context
2694 *
2695 *	RETURN:	0 on success, error code on failure.
2696 *
2697 * Timestamps:
2698 *	vp - ctime updated, mtime updated if size changed.
2699 */
2700/* ARGSUSED */
2701static int
2702zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2703    caller_context_t *ct)
2704{
2705	znode_t		*zp = VTOZ(vp);
2706	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
2707	zilog_t		*zilog;
2708	dmu_tx_t	*tx;
2709	vattr_t		oldva;
2710	xvattr_t	tmpxvattr;
2711	uint_t		mask = vap->va_mask;
2712	uint_t		saved_mask = 0;
2713	int		trim_mask = 0;
2714	uint64_t	new_mode;
2715	uint64_t	new_uid, new_gid;
2716	uint64_t	xattr_obj;
2717	uint64_t	mtime[2], ctime[2];
2718	znode_t		*attrzp;
2719	int		need_policy = FALSE;
2720	int		err, err2;
2721	zfs_fuid_info_t *fuidp = NULL;
2722	xvattr_t *xvap = (xvattr_t *)vap;	/* vap may be an xvattr_t * */
2723	xoptattr_t	*xoap;
2724	zfs_acl_t	*aclp;
2725	boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2726	boolean_t	fuid_dirtied = B_FALSE;
2727	sa_bulk_attr_t	bulk[7], xattr_bulk[7];
2728	int		count = 0, xattr_count = 0;
2729
2730	if (mask == 0)
2731		return (0);
2732
2733	if (mask & AT_NOSET)
2734		return (SET_ERROR(EINVAL));
2735
2736	ZFS_ENTER(zfsvfs);
2737	ZFS_VERIFY_ZP(zp);
2738
2739	zilog = zfsvfs->z_log;
2740
2741	/*
2742	 * Make sure that if we have ephemeral uid/gid or xvattr specified
2743	 * that file system is at proper version level
2744	 */
2745
2746	if (zfsvfs->z_use_fuids == B_FALSE &&
2747	    (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2748	    ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2749	    (mask & AT_XVATTR))) {
2750		ZFS_EXIT(zfsvfs);
2751		return (SET_ERROR(EINVAL));
2752	}
2753
2754	if (mask & AT_SIZE && vp->v_type == VDIR) {
2755		ZFS_EXIT(zfsvfs);
2756		return (SET_ERROR(EISDIR));
2757	}
2758
2759	if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
2760		ZFS_EXIT(zfsvfs);
2761		return (SET_ERROR(EINVAL));
2762	}
2763
2764	/*
2765	 * If this is an xvattr_t, then get a pointer to the structure of
2766	 * optional attributes.  If this is NULL, then we have a vattr_t.
2767	 */
2768	xoap = xva_getxoptattr(xvap);
2769
2770	xva_init(&tmpxvattr);
2771
2772	/*
2773	 * Immutable files can only alter immutable bit and atime
2774	 */
2775	if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2776	    ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
2777	    ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2778		ZFS_EXIT(zfsvfs);
2779		return (SET_ERROR(EPERM));
2780	}
2781
2782	if ((mask & AT_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2783		ZFS_EXIT(zfsvfs);
2784		return (SET_ERROR(EPERM));
2785	}
2786
2787	/*
2788	 * Verify timestamps doesn't overflow 32 bits.
2789	 * ZFS can handle large timestamps, but 32bit syscalls can't
2790	 * handle times greater than 2039.  This check should be removed
2791	 * once large timestamps are fully supported.
2792	 */
2793	if (mask & (AT_ATIME | AT_MTIME)) {
2794		if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2795		    ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2796			ZFS_EXIT(zfsvfs);
2797			return (SET_ERROR(EOVERFLOW));
2798		}
2799	}
2800
2801top:
2802	attrzp = NULL;
2803	aclp = NULL;
2804
2805	/* Can this be moved to before the top label? */
2806	if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
2807		ZFS_EXIT(zfsvfs);
2808		return (SET_ERROR(EROFS));
2809	}
2810
2811	/*
2812	 * First validate permissions
2813	 */
2814
2815	if (mask & AT_SIZE) {
2816		err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2817		if (err) {
2818			ZFS_EXIT(zfsvfs);
2819			return (err);
2820		}
2821		/*
2822		 * XXX - Note, we are not providing any open
2823		 * mode flags here (like FNDELAY), so we may
2824		 * block if there are locks present... this
2825		 * should be addressed in openat().
2826		 */
2827		/* XXX - would it be OK to generate a log record here? */
2828		err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2829		if (err) {
2830			ZFS_EXIT(zfsvfs);
2831			return (err);
2832		}
2833
2834		if (vap->va_size == 0)
2835			vnevent_truncate(ZTOV(zp), ct);
2836	}
2837
2838	if (mask & (AT_ATIME|AT_MTIME) ||
2839	    ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2840	    XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2841	    XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2842	    XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2843	    XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2844	    XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2845	    XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2846		need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2847		    skipaclchk, cr);
2848	}
2849
2850	if (mask & (AT_UID|AT_GID)) {
2851		int	idmask = (mask & (AT_UID|AT_GID));
2852		int	take_owner;
2853		int	take_group;
2854
2855		/*
2856		 * NOTE: even if a new mode is being set,
2857		 * we may clear S_ISUID/S_ISGID bits.
2858		 */
2859
2860		if (!(mask & AT_MODE))
2861			vap->va_mode = zp->z_mode;
2862
2863		/*
2864		 * Take ownership or chgrp to group we are a member of
2865		 */
2866
2867		take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
2868		take_group = (mask & AT_GID) &&
2869		    zfs_groupmember(zfsvfs, vap->va_gid, cr);
2870
2871		/*
2872		 * If both AT_UID and AT_GID are set then take_owner and
2873		 * take_group must both be set in order to allow taking
2874		 * ownership.
2875		 *
2876		 * Otherwise, send the check through secpolicy_vnode_setattr()
2877		 *
2878		 */
2879
2880		if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
2881		    ((idmask == AT_UID) && take_owner) ||
2882		    ((idmask == AT_GID) && take_group)) {
2883			if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2884			    skipaclchk, cr) == 0) {
2885				/*
2886				 * Remove setuid/setgid for non-privileged users
2887				 */
2888				secpolicy_setid_clear(vap, cr);
2889				trim_mask = (mask & (AT_UID|AT_GID));
2890			} else {
2891				need_policy =  TRUE;
2892			}
2893		} else {
2894			need_policy =  TRUE;
2895		}
2896	}
2897
2898	mutex_enter(&zp->z_lock);
2899	oldva.va_mode = zp->z_mode;
2900	zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2901	if (mask & AT_XVATTR) {
2902		/*
2903		 * Update xvattr mask to include only those attributes
2904		 * that are actually changing.
2905		 *
2906		 * the bits will be restored prior to actually setting
2907		 * the attributes so the caller thinks they were set.
2908		 */
2909		if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2910			if (xoap->xoa_appendonly !=
2911			    ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2912				need_policy = TRUE;
2913			} else {
2914				XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2915				XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
2916			}
2917		}
2918
2919		if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2920			if (xoap->xoa_nounlink !=
2921			    ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2922				need_policy = TRUE;
2923			} else {
2924				XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2925				XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
2926			}
2927		}
2928
2929		if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2930			if (xoap->xoa_immutable !=
2931			    ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2932				need_policy = TRUE;
2933			} else {
2934				XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2935				XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
2936			}
2937		}
2938
2939		if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2940			if (xoap->xoa_nodump !=
2941			    ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2942				need_policy = TRUE;
2943			} else {
2944				XVA_CLR_REQ(xvap, XAT_NODUMP);
2945				XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
2946			}
2947		}
2948
2949		if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2950			if (xoap->xoa_av_modified !=
2951			    ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2952				need_policy = TRUE;
2953			} else {
2954				XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2955				XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
2956			}
2957		}
2958
2959		if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2960			if ((vp->v_type != VREG &&
2961			    xoap->xoa_av_quarantined) ||
2962			    xoap->xoa_av_quarantined !=
2963			    ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2964				need_policy = TRUE;
2965			} else {
2966				XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2967				XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
2968			}
2969		}
2970
2971		if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2972			mutex_exit(&zp->z_lock);
2973			ZFS_EXIT(zfsvfs);
2974			return (SET_ERROR(EPERM));
2975		}
2976
2977		if (need_policy == FALSE &&
2978		    (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2979		    XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2980			need_policy = TRUE;
2981		}
2982	}
2983
2984	mutex_exit(&zp->z_lock);
2985
2986	if (mask & AT_MODE) {
2987		if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
2988			err = secpolicy_setid_setsticky_clear(vp, vap,
2989			    &oldva, cr);
2990			if (err) {
2991				ZFS_EXIT(zfsvfs);
2992				return (err);
2993			}
2994			trim_mask |= AT_MODE;
2995		} else {
2996			need_policy = TRUE;
2997		}
2998	}
2999
3000	if (need_policy) {
3001		/*
3002		 * If trim_mask is set then take ownership
3003		 * has been granted or write_acl is present and user
3004		 * has the ability to modify mode.  In that case remove
3005		 * UID|GID and or MODE from mask so that
3006		 * secpolicy_vnode_setattr() doesn't revoke it.
3007		 */
3008
3009		if (trim_mask) {
3010			saved_mask = vap->va_mask;
3011			vap->va_mask &= ~trim_mask;
3012		}
3013		err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
3014		    (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
3015		if (err) {
3016			ZFS_EXIT(zfsvfs);
3017			return (err);
3018		}
3019
3020		if (trim_mask)
3021			vap->va_mask |= saved_mask;
3022	}
3023
3024	/*
3025	 * secpolicy_vnode_setattr, or take ownership may have
3026	 * changed va_mask
3027	 */
3028	mask = vap->va_mask;
3029
3030	if ((mask & (AT_UID | AT_GID))) {
3031		err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
3032		    &xattr_obj, sizeof (xattr_obj));
3033
3034		if (err == 0 && xattr_obj) {
3035			err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
3036			if (err)
3037				goto out2;
3038		}
3039		if (mask & AT_UID) {
3040			new_uid = zfs_fuid_create(zfsvfs,
3041			    (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
3042			if (new_uid != zp->z_uid &&
3043			    zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) {
3044				if (attrzp)
3045					VN_RELE(ZTOV(attrzp));
3046				err = SET_ERROR(EDQUOT);
3047				goto out2;
3048			}
3049		}
3050
3051		if (mask & AT_GID) {
3052			new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
3053			    cr, ZFS_GROUP, &fuidp);
3054			if (new_gid != zp->z_gid &&
3055			    zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) {
3056				if (attrzp)
3057					VN_RELE(ZTOV(attrzp));
3058				err = SET_ERROR(EDQUOT);
3059				goto out2;
3060			}
3061		}
3062	}
3063	tx = dmu_tx_create(zfsvfs->z_os);
3064
3065	if (mask & AT_MODE) {
3066		uint64_t pmode = zp->z_mode;
3067		uint64_t acl_obj;
3068		new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
3069
3070		if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
3071		    !(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
3072			err = SET_ERROR(EPERM);
3073			goto out;
3074		}
3075
3076		if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode))
3077			goto out;
3078
3079		mutex_enter(&zp->z_lock);
3080		if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
3081			/*
3082			 * Are we upgrading ACL from old V0 format
3083			 * to V1 format?
3084			 */
3085			if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
3086			    zfs_znode_acl_version(zp) ==
3087			    ZFS_ACL_VERSION_INITIAL) {
3088				dmu_tx_hold_free(tx, acl_obj, 0,
3089				    DMU_OBJECT_END);
3090				dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3091				    0, aclp->z_acl_bytes);
3092			} else {
3093				dmu_tx_hold_write(tx, acl_obj, 0,
3094				    aclp->z_acl_bytes);
3095			}
3096		} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3097			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3098			    0, aclp->z_acl_bytes);
3099		}
3100		mutex_exit(&zp->z_lock);
3101		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3102	} else {
3103		if ((mask & AT_XVATTR) &&
3104		    XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3105			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3106		else
3107			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3108	}
3109
3110	if (attrzp) {
3111		dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3112	}
3113
3114	fuid_dirtied = zfsvfs->z_fuid_dirty;
3115	if (fuid_dirtied)
3116		zfs_fuid_txhold(zfsvfs, tx);
3117
3118	zfs_sa_upgrade_txholds(tx, zp);
3119
3120	err = dmu_tx_assign(tx, TXG_WAIT);
3121	if (err)
3122		goto out;
3123
3124	count = 0;
3125	/*
3126	 * Set each attribute requested.
3127	 * We group settings according to the locks they need to acquire.
3128	 *
3129	 * Note: you cannot set ctime directly, although it will be
3130	 * updated as a side-effect of calling this function.
3131	 */
3132
3133
3134	if (mask & (AT_UID|AT_GID|AT_MODE))
3135		mutex_enter(&zp->z_acl_lock);
3136	mutex_enter(&zp->z_lock);
3137
3138	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3139	    &zp->z_pflags, sizeof (zp->z_pflags));
3140
3141	if (attrzp) {
3142		if (mask & (AT_UID|AT_GID|AT_MODE))
3143			mutex_enter(&attrzp->z_acl_lock);
3144		mutex_enter(&attrzp->z_lock);
3145		SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3146		    SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3147		    sizeof (attrzp->z_pflags));
3148	}
3149
3150	if (mask & (AT_UID|AT_GID)) {
3151
3152		if (mask & AT_UID) {
3153			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3154			    &new_uid, sizeof (new_uid));
3155			zp->z_uid = new_uid;
3156			if (attrzp) {
3157				SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3158				    SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3159				    sizeof (new_uid));
3160				attrzp->z_uid = new_uid;
3161			}
3162		}
3163
3164		if (mask & AT_GID) {
3165			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3166			    NULL, &new_gid, sizeof (new_gid));
3167			zp->z_gid = new_gid;
3168			if (attrzp) {
3169				SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3170				    SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3171				    sizeof (new_gid));
3172				attrzp->z_gid = new_gid;
3173			}
3174		}
3175		if (!(mask & AT_MODE)) {
3176			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3177			    NULL, &new_mode, sizeof (new_mode));
3178			new_mode = zp->z_mode;
3179		}
3180		err = zfs_acl_chown_setattr(zp);
3181		ASSERT(err == 0);
3182		if (attrzp) {
3183			err = zfs_acl_chown_setattr(attrzp);
3184			ASSERT(err == 0);
3185		}
3186	}
3187
3188	if (mask & AT_MODE) {
3189		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3190		    &new_mode, sizeof (new_mode));
3191		zp->z_mode = new_mode;
3192		ASSERT3U((uintptr_t)aclp, !=, NULL);
3193		err = zfs_aclset_common(zp, aclp, cr, tx);
3194		ASSERT0(err);
3195		if (zp->z_acl_cached)
3196			zfs_acl_free(zp->z_acl_cached);
3197		zp->z_acl_cached = aclp;
3198		aclp = NULL;
3199	}
3200
3201
3202	if (mask & AT_ATIME) {
3203		ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
3204		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3205		    &zp->z_atime, sizeof (zp->z_atime));
3206	}
3207
3208	if (mask & AT_MTIME) {
3209		ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3210		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3211		    mtime, sizeof (mtime));
3212	}
3213
3214	/* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
3215	if (mask & AT_SIZE && !(mask & AT_MTIME)) {
3216		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
3217		    NULL, mtime, sizeof (mtime));
3218		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3219		    &ctime, sizeof (ctime));
3220		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
3221		    B_TRUE);
3222	} else if (mask != 0) {
3223		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3224		    &ctime, sizeof (ctime));
3225		zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
3226		    B_TRUE);
3227		if (attrzp) {
3228			SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3229			    SA_ZPL_CTIME(zfsvfs), NULL,
3230			    &ctime, sizeof (ctime));
3231			zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
3232			    mtime, ctime, B_TRUE);
3233		}
3234	}
3235	/*
3236	 * Do this after setting timestamps to prevent timestamp
3237	 * update from toggling bit
3238	 */
3239
3240	if (xoap && (mask & AT_XVATTR)) {
3241
3242		/*
3243		 * restore trimmed off masks
3244		 * so that return masks can be set for caller.
3245		 */
3246
3247		if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
3248			XVA_SET_REQ(xvap, XAT_APPENDONLY);
3249		}
3250		if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
3251			XVA_SET_REQ(xvap, XAT_NOUNLINK);
3252		}
3253		if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
3254			XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3255		}
3256		if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
3257			XVA_SET_REQ(xvap, XAT_NODUMP);
3258		}
3259		if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
3260			XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3261		}
3262		if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
3263			XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3264		}
3265
3266		if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3267			ASSERT(vp->v_type == VREG);
3268
3269		zfs_xvattr_set(zp, xvap, tx);
3270	}
3271
3272	if (fuid_dirtied)
3273		zfs_fuid_sync(zfsvfs, tx);
3274
3275	if (mask != 0)
3276		zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3277
3278	mutex_exit(&zp->z_lock);
3279	if (mask & (AT_UID|AT_GID|AT_MODE))
3280		mutex_exit(&zp->z_acl_lock);
3281
3282	if (attrzp) {
3283		if (mask & (AT_UID|AT_GID|AT_MODE))
3284			mutex_exit(&attrzp->z_acl_lock);
3285		mutex_exit(&attrzp->z_lock);
3286	}
3287out:
3288	if (err == 0 && attrzp) {
3289		err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3290		    xattr_count, tx);
3291		ASSERT(err2 == 0);
3292	}
3293
3294	if (attrzp)
3295		VN_RELE(ZTOV(attrzp));
3296
3297	if (aclp)
3298		zfs_acl_free(aclp);
3299
3300	if (fuidp) {
3301		zfs_fuid_info_free(fuidp);
3302		fuidp = NULL;
3303	}
3304
3305	if (err) {
3306		dmu_tx_abort(tx);
3307		if (err == ERESTART)
3308			goto top;
3309	} else {
3310		err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3311		dmu_tx_commit(tx);
3312	}
3313
3314out2:
3315	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3316		zil_commit(zilog, 0);
3317
3318	ZFS_EXIT(zfsvfs);
3319	return (err);
3320}
3321
3322typedef struct zfs_zlock {
3323	krwlock_t	*zl_rwlock;	/* lock we acquired */
3324	znode_t		*zl_znode;	/* znode we held */
3325	struct zfs_zlock *zl_next;	/* next in list */
3326} zfs_zlock_t;
3327
3328/*
3329 * Drop locks and release vnodes that were held by zfs_rename_lock().
3330 */
3331static void
3332zfs_rename_unlock(zfs_zlock_t **zlpp)
3333{
3334	zfs_zlock_t *zl;
3335
3336	while ((zl = *zlpp) != NULL) {
3337		if (zl->zl_znode != NULL)
3338			VN_RELE(ZTOV(zl->zl_znode));
3339		rw_exit(zl->zl_rwlock);
3340		*zlpp = zl->zl_next;
3341		kmem_free(zl, sizeof (*zl));
3342	}
3343}
3344
3345/*
3346 * Search back through the directory tree, using the ".." entries.
3347 * Lock each directory in the chain to prevent concurrent renames.
3348 * Fail any attempt to move a directory into one of its own descendants.
3349 * XXX - z_parent_lock can overlap with map or grow locks
3350 */
3351static int
3352zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3353{
3354	zfs_zlock_t	*zl;
3355	znode_t		*zp = tdzp;
3356	uint64_t	rootid = zp->z_zfsvfs->z_root;
3357	uint64_t	oidp = zp->z_id;
3358	krwlock_t	*rwlp = &szp->z_parent_lock;
3359	krw_t		rw = RW_WRITER;
3360
3361	/*
3362	 * First pass write-locks szp and compares to zp->z_id.
3363	 * Later passes read-lock zp and compare to zp->z_parent.
3364	 */
3365	do {
3366		if (!rw_tryenter(rwlp, rw)) {
3367			/*
3368			 * Another thread is renaming in this path.
3369			 * Note that if we are a WRITER, we don't have any
3370			 * parent_locks held yet.
3371			 */
3372			if (rw == RW_READER && zp->z_id > szp->z_id) {
3373				/*
3374				 * Drop our locks and restart
3375				 */
3376				zfs_rename_unlock(&zl);
3377				*zlpp = NULL;
3378				zp = tdzp;
3379				oidp = zp->z_id;
3380				rwlp = &szp->z_parent_lock;
3381				rw = RW_WRITER;
3382				continue;
3383			} else {
3384				/*
3385				 * Wait for other thread to drop its locks
3386				 */
3387				rw_enter(rwlp, rw);
3388			}
3389		}
3390
3391		zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3392		zl->zl_rwlock = rwlp;
3393		zl->zl_znode = NULL;
3394		zl->zl_next = *zlpp;
3395		*zlpp = zl;
3396
3397		if (oidp == szp->z_id)		/* We're a descendant of szp */
3398			return (SET_ERROR(EINVAL));
3399
3400		if (oidp == rootid)		/* We've hit the top */
3401			return (0);
3402
3403		if (rw == RW_READER) {		/* i.e. not the first pass */
3404			int error = zfs_zget(zp->z_zfsvfs, oidp, &zp);
3405			if (error)
3406				return (error);
3407			zl->zl_znode = zp;
3408		}
3409		(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zp->z_zfsvfs),
3410		    &oidp, sizeof (oidp));
3411		rwlp = &zp->z_parent_lock;
3412		rw = RW_READER;
3413
3414	} while (zp->z_id != sdzp->z_id);
3415
3416	return (0);
3417}
3418
3419/*
3420 * Move an entry from the provided source directory to the target
3421 * directory.  Change the entry name as indicated.
3422 *
3423 *	IN:	sdvp	- Source directory containing the "old entry".
3424 *		snm	- Old entry name.
3425 *		tdvp	- Target directory to contain the "new entry".
3426 *		tnm	- New entry name.
3427 *		cr	- credentials of caller.
3428 *		ct	- caller context
3429 *		flags	- case flags
3430 *
3431 *	RETURN:	0 on success, error code on failure.
3432 *
3433 * Timestamps:
3434 *	sdvp,tdvp - ctime|mtime updated
3435 */
3436/*ARGSUSED*/
3437static int
3438zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
3439    caller_context_t *ct, int flags)
3440{
3441	znode_t		*tdzp, *szp, *tzp;
3442	znode_t		*sdzp = VTOZ(sdvp);
3443	zfsvfs_t	*zfsvfs = sdzp->z_zfsvfs;
3444	zilog_t		*zilog;
3445	vnode_t		*realvp;
3446	zfs_dirlock_t	*sdl, *tdl;
3447	dmu_tx_t	*tx;
3448	zfs_zlock_t	*zl;
3449	int		cmp, serr, terr;
3450	int		error = 0, rm_err = 0;
3451	int		zflg = 0;
3452	boolean_t	waited = B_FALSE;
3453
3454	ZFS_ENTER(zfsvfs);
3455	ZFS_VERIFY_ZP(sdzp);
3456	zilog = zfsvfs->z_log;
3457
3458	/*
3459	 * Make sure we have the real vp for the target directory.
3460	 */
3461	if (VOP_REALVP(tdvp, &realvp, ct) == 0)
3462		tdvp = realvp;
3463
3464	tdzp = VTOZ(tdvp);
3465	ZFS_VERIFY_ZP(tdzp);
3466
3467	/*
3468	 * We check z_zfsvfs rather than v_vfsp here, because snapshots and the
3469	 * ctldir appear to have the same v_vfsp.
3470	 */
3471	if (tdzp->z_zfsvfs != zfsvfs || zfsctl_is_node(tdvp)) {
3472		ZFS_EXIT(zfsvfs);
3473		return (SET_ERROR(EXDEV));
3474	}
3475
3476	if (zfsvfs->z_utf8 && u8_validate(tnm,
3477	    strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3478		ZFS_EXIT(zfsvfs);
3479		return (SET_ERROR(EILSEQ));
3480	}
3481
3482	if (flags & FIGNORECASE)
3483		zflg |= ZCILOOK;
3484
3485top:
3486	szp = NULL;
3487	tzp = NULL;
3488	zl = NULL;
3489
3490	/*
3491	 * This is to prevent the creation of links into attribute space
3492	 * by renaming a linked file into/outof an attribute directory.
3493	 * See the comment in zfs_link() for why this is considered bad.
3494	 */
3495	if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3496		ZFS_EXIT(zfsvfs);
3497		return (SET_ERROR(EINVAL));
3498	}
3499
3500	/*
3501	 * Lock source and target directory entries.  To prevent deadlock,
3502	 * a lock ordering must be defined.  We lock the directory with
3503	 * the smallest object id first, or if it's a tie, the one with
3504	 * the lexically first name.
3505	 */
3506	if (sdzp->z_id < tdzp->z_id) {
3507		cmp = -1;
3508	} else if (sdzp->z_id > tdzp->z_id) {
3509		cmp = 1;
3510	} else {
3511		/*
3512		 * First compare the two name arguments without
3513		 * considering any case folding.
3514		 */
3515		int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
3516
3517		cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3518		ASSERT(error == 0 || !zfsvfs->z_utf8);
3519		if (cmp == 0) {
3520			/*
3521			 * POSIX: "If the old argument and the new argument
3522			 * both refer to links to the same existing file,
3523			 * the rename() function shall return successfully
3524			 * and perform no other action."
3525			 */
3526			ZFS_EXIT(zfsvfs);
3527			return (0);
3528		}
3529		/*
3530		 * If the file system is case-folding, then we may
3531		 * have some more checking to do.  A case-folding file
3532		 * system is either supporting mixed case sensitivity
3533		 * access or is completely case-insensitive.  Note
3534		 * that the file system is always case preserving.
3535		 *
3536		 * In mixed sensitivity mode case sensitive behavior
3537		 * is the default.  FIGNORECASE must be used to
3538		 * explicitly request case insensitive behavior.
3539		 *
3540		 * If the source and target names provided differ only
3541		 * by case (e.g., a request to rename 'tim' to 'Tim'),
3542		 * we will treat this as a special case in the
3543		 * case-insensitive mode: as long as the source name
3544		 * is an exact match, we will allow this to proceed as
3545		 * a name-change request.
3546		 */
3547		if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3548		    (zfsvfs->z_case == ZFS_CASE_MIXED &&
3549		    flags & FIGNORECASE)) &&
3550		    u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
3551		    &error) == 0) {
3552			/*
3553			 * case preserving rename request, require exact
3554			 * name matches
3555			 */
3556			zflg |= ZCIEXACT;
3557			zflg &= ~ZCILOOK;
3558		}
3559	}
3560
3561	/*
3562	 * If the source and destination directories are the same, we should
3563	 * grab the z_name_lock of that directory only once.
3564	 */
3565	if (sdzp == tdzp) {
3566		zflg |= ZHAVELOCK;
3567		rw_enter(&sdzp->z_name_lock, RW_READER);
3568	}
3569
3570	if (cmp < 0) {
3571		serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3572		    ZEXISTS | zflg, NULL, NULL);
3573		terr = zfs_dirent_lock(&tdl,
3574		    tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3575	} else {
3576		terr = zfs_dirent_lock(&tdl,
3577		    tdzp, tnm, &tzp, zflg, NULL, NULL);
3578		serr = zfs_dirent_lock(&sdl,
3579		    sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3580		    NULL, NULL);
3581	}
3582
3583	if (serr) {
3584		/*
3585		 * Source entry invalid or not there.
3586		 */
3587		if (!terr) {
3588			zfs_dirent_unlock(tdl);
3589			if (tzp)
3590				VN_RELE(ZTOV(tzp));
3591		}
3592
3593		if (sdzp == tdzp)
3594			rw_exit(&sdzp->z_name_lock);
3595
3596		if (strcmp(snm, "..") == 0)
3597			serr = SET_ERROR(EINVAL);
3598		ZFS_EXIT(zfsvfs);
3599		return (serr);
3600	}
3601	if (terr) {
3602		zfs_dirent_unlock(sdl);
3603		VN_RELE(ZTOV(szp));
3604
3605		if (sdzp == tdzp)
3606			rw_exit(&sdzp->z_name_lock);
3607
3608		if (strcmp(tnm, "..") == 0)
3609			terr = SET_ERROR(EINVAL);
3610		ZFS_EXIT(zfsvfs);
3611		return (terr);
3612	}
3613
3614	/*
3615	 * Must have write access at the source to remove the old entry
3616	 * and write access at the target to create the new entry.
3617	 * Note that if target and source are the same, this can be
3618	 * done in a single check.
3619	 */
3620
3621	if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))
3622		goto out;
3623
3624	if (ZTOV(szp)->v_type == VDIR) {
3625		/*
3626		 * Check to make sure rename is valid.
3627		 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3628		 */
3629		if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl))
3630			goto out;
3631	}
3632
3633	/*
3634	 * Does target exist?
3635	 */
3636	if (tzp) {
3637		/*
3638		 * Source and target must be the same type.
3639		 */
3640		if (ZTOV(szp)->v_type == VDIR) {
3641			if (ZTOV(tzp)->v_type != VDIR) {
3642				error = SET_ERROR(ENOTDIR);
3643				goto out;
3644			}
3645		} else {
3646			if (ZTOV(tzp)->v_type == VDIR) {
3647				error = SET_ERROR(EISDIR);
3648				goto out;
3649			}
3650		}
3651		/*
3652		 * POSIX dictates that when the source and target
3653		 * entries refer to the same file object, rename
3654		 * must do nothing and exit without error.
3655		 */
3656		if (szp->z_id == tzp->z_id) {
3657			error = 0;
3658			goto out;
3659		}
3660	}
3661
3662	vnevent_pre_rename_src(ZTOV(szp), sdvp, snm, ct);
3663	if (tzp)
3664		vnevent_pre_rename_dest(ZTOV(tzp), tdvp, tnm, ct);
3665
3666	/*
3667	 * notify the target directory if it is not the same
3668	 * as source directory.
3669	 */
3670	if (tdvp != sdvp) {
3671		vnevent_pre_rename_dest_dir(tdvp, ZTOV(szp), tnm, ct);
3672	}
3673
3674	tx = dmu_tx_create(zfsvfs->z_os);
3675	dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3676	dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3677	dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3678	dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3679	if (sdzp != tdzp) {
3680		dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3681		zfs_sa_upgrade_txholds(tx, tdzp);
3682	}
3683	if (tzp) {
3684		dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3685		zfs_sa_upgrade_txholds(tx, tzp);
3686	}
3687
3688	zfs_sa_upgrade_txholds(tx, szp);
3689	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3690	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
3691	if (error) {
3692		if (zl != NULL)
3693			zfs_rename_unlock(&zl);
3694		zfs_dirent_unlock(sdl);
3695		zfs_dirent_unlock(tdl);
3696
3697		if (sdzp == tdzp)
3698			rw_exit(&sdzp->z_name_lock);
3699
3700		VN_RELE(ZTOV(szp));
3701		if (tzp)
3702			VN_RELE(ZTOV(tzp));
3703		if (error == ERESTART) {
3704			waited = B_TRUE;
3705			dmu_tx_wait(tx);
3706			dmu_tx_abort(tx);
3707			goto top;
3708		}
3709		dmu_tx_abort(tx);
3710		ZFS_EXIT(zfsvfs);
3711		return (error);
3712	}
3713
3714	if (tzp)	/* Attempt to remove the existing target */
3715		error = rm_err = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3716
3717	if (error == 0) {
3718		error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3719		if (error == 0) {
3720			szp->z_pflags |= ZFS_AV_MODIFIED;
3721
3722			error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3723			    (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3724			ASSERT0(error);
3725
3726			error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3727			if (error == 0) {
3728				zfs_log_rename(zilog, tx, TX_RENAME |
3729				    (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3730				    sdl->dl_name, tdzp, tdl->dl_name, szp);
3731
3732				/*
3733				 * Update path information for the target vnode
3734				 */
3735				vn_renamepath(tdvp, ZTOV(szp), tnm,
3736				    strlen(tnm));
3737			} else {
3738				/*
3739				 * At this point, we have successfully created
3740				 * the target name, but have failed to remove
3741				 * the source name.  Since the create was done
3742				 * with the ZRENAMING flag, there are
3743				 * complications; for one, the link count is
3744				 * wrong.  The easiest way to deal with this
3745				 * is to remove the newly created target, and
3746				 * return the original error.  This must
3747				 * succeed; fortunately, it is very unlikely to
3748				 * fail, since we just created it.
3749				 */
3750				VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3751				    ZRENAMING, NULL), ==, 0);
3752			}
3753		}
3754	}
3755
3756	dmu_tx_commit(tx);
3757
3758	if (tzp && rm_err == 0)
3759		vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct);
3760
3761	if (error == 0) {
3762		vnevent_rename_src(ZTOV(szp), sdvp, snm, ct);
3763		/* notify the target dir if it is not the same as source dir */
3764		if (tdvp != sdvp)
3765			vnevent_rename_dest_dir(tdvp, ct);
3766	}
3767out:
3768	if (zl != NULL)
3769		zfs_rename_unlock(&zl);
3770
3771	zfs_dirent_unlock(sdl);
3772	zfs_dirent_unlock(tdl);
3773
3774	if (sdzp == tdzp)
3775		rw_exit(&sdzp->z_name_lock);
3776
3777
3778	VN_RELE(ZTOV(szp));
3779	if (tzp)
3780		VN_RELE(ZTOV(tzp));
3781
3782	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3783		zil_commit(zilog, 0);
3784
3785	ZFS_EXIT(zfsvfs);
3786	return (error);
3787}
3788
3789/*
3790 * Insert the indicated symbolic reference entry into the directory.
3791 *
3792 *	IN:	dvp	- Directory to contain new symbolic link.
3793 *		link	- Name for new symlink entry.
3794 *		vap	- Attributes of new entry.
3795 *		cr	- credentials of caller.
3796 *		ct	- caller context
3797 *		flags	- case flags
3798 *
3799 *	RETURN:	0 on success, error code on failure.
3800 *
3801 * Timestamps:
3802 *	dvp - ctime|mtime updated
3803 */
3804/*ARGSUSED*/
3805static int
3806zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr,
3807    caller_context_t *ct, int flags)
3808{
3809	znode_t		*zp, *dzp = VTOZ(dvp);
3810	zfs_dirlock_t	*dl;
3811	dmu_tx_t	*tx;
3812	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
3813	zilog_t		*zilog;
3814	uint64_t	len = strlen(link);
3815	int		error;
3816	int		zflg = ZNEW;
3817	zfs_acl_ids_t	acl_ids;
3818	boolean_t	fuid_dirtied;
3819	uint64_t	txtype = TX_SYMLINK;
3820	boolean_t	waited = B_FALSE;
3821
3822	ASSERT(vap->va_type == VLNK);
3823
3824	ZFS_ENTER(zfsvfs);
3825	ZFS_VERIFY_ZP(dzp);
3826	zilog = zfsvfs->z_log;
3827
3828	if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
3829	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3830		ZFS_EXIT(zfsvfs);
3831		return (SET_ERROR(EILSEQ));
3832	}
3833	if (flags & FIGNORECASE)
3834		zflg |= ZCILOOK;
3835
3836	if (len > MAXPATHLEN) {
3837		ZFS_EXIT(zfsvfs);
3838		return (SET_ERROR(ENAMETOOLONG));
3839	}
3840
3841	if ((error = zfs_acl_ids_create(dzp, 0,
3842	    vap, cr, NULL, &acl_ids)) != 0) {
3843		ZFS_EXIT(zfsvfs);
3844		return (error);
3845	}
3846top:
3847	/*
3848	 * Attempt to lock directory; fail if entry already exists.
3849	 */
3850	error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3851	if (error) {
3852		zfs_acl_ids_free(&acl_ids);
3853		ZFS_EXIT(zfsvfs);
3854		return (error);
3855	}
3856
3857	if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
3858		zfs_acl_ids_free(&acl_ids);
3859		zfs_dirent_unlock(dl);
3860		ZFS_EXIT(zfsvfs);
3861		return (error);
3862	}
3863
3864	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
3865		zfs_acl_ids_free(&acl_ids);
3866		zfs_dirent_unlock(dl);
3867		ZFS_EXIT(zfsvfs);
3868		return (SET_ERROR(EDQUOT));
3869	}
3870	tx = dmu_tx_create(zfsvfs->z_os);
3871	fuid_dirtied = zfsvfs->z_fuid_dirty;
3872	dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3873	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3874	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3875	    ZFS_SA_BASE_ATTR_SIZE + len);
3876	dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3877	if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3878		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3879		    acl_ids.z_aclp->z_acl_bytes);
3880	}
3881	if (fuid_dirtied)
3882		zfs_fuid_txhold(zfsvfs, tx);
3883	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
3884	if (error) {
3885		zfs_dirent_unlock(dl);
3886		if (error == ERESTART) {
3887			waited = B_TRUE;
3888			dmu_tx_wait(tx);
3889			dmu_tx_abort(tx);
3890			goto top;
3891		}
3892		zfs_acl_ids_free(&acl_ids);
3893		dmu_tx_abort(tx);
3894		ZFS_EXIT(zfsvfs);
3895		return (error);
3896	}
3897
3898	/*
3899	 * Create a new object for the symlink.
3900	 * for version 4 ZPL datsets the symlink will be an SA attribute
3901	 */
3902	zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3903
3904	if (fuid_dirtied)
3905		zfs_fuid_sync(zfsvfs, tx);
3906
3907	mutex_enter(&zp->z_lock);
3908	if (zp->z_is_sa)
3909		error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
3910		    link, len, tx);
3911	else
3912		zfs_sa_symlink(zp, link, len, tx);
3913	mutex_exit(&zp->z_lock);
3914
3915	zp->z_size = len;
3916	(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
3917	    &zp->z_size, sizeof (zp->z_size), tx);
3918	/*
3919	 * Insert the new object into the directory.
3920	 */
3921	(void) zfs_link_create(dl, zp, tx, ZNEW);
3922
3923	if (flags & FIGNORECASE)
3924		txtype |= TX_CI;
3925	zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3926
3927	zfs_acl_ids_free(&acl_ids);
3928
3929	dmu_tx_commit(tx);
3930
3931	zfs_dirent_unlock(dl);
3932
3933	VN_RELE(ZTOV(zp));
3934
3935	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3936		zil_commit(zilog, 0);
3937
3938	ZFS_EXIT(zfsvfs);
3939	return (error);
3940}
3941
3942/*
3943 * Return, in the buffer contained in the provided uio structure,
3944 * the symbolic path referred to by vp.
3945 *
3946 *	IN:	vp	- vnode of symbolic link.
3947 *		uio	- structure to contain the link path.
3948 *		cr	- credentials of caller.
3949 *		ct	- caller context
3950 *
3951 *	OUT:	uio	- structure containing the link path.
3952 *
3953 *	RETURN:	0 on success, error code on failure.
3954 *
3955 * Timestamps:
3956 *	vp - atime updated
3957 */
3958/* ARGSUSED */
3959static int
3960zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct)
3961{
3962	znode_t		*zp = VTOZ(vp);
3963	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
3964	int		error;
3965
3966	ZFS_ENTER(zfsvfs);
3967	ZFS_VERIFY_ZP(zp);
3968
3969	mutex_enter(&zp->z_lock);
3970	if (zp->z_is_sa)
3971		error = sa_lookup_uio(zp->z_sa_hdl,
3972		    SA_ZPL_SYMLINK(zfsvfs), uio);
3973	else
3974		error = zfs_sa_readlink(zp, uio);
3975	mutex_exit(&zp->z_lock);
3976
3977	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
3978
3979	ZFS_EXIT(zfsvfs);
3980	return (error);
3981}
3982
3983/*
3984 * Insert a new entry into directory tdvp referencing svp.
3985 *
3986 *	IN:	tdvp	- Directory to contain new entry.
3987 *		svp	- vnode of new entry.
3988 *		name	- name of new entry.
3989 *		cr	- credentials of caller.
3990 *		ct	- caller context
3991 *
3992 *	RETURN:	0 on success, error code on failure.
3993 *
3994 * Timestamps:
3995 *	tdvp - ctime|mtime updated
3996 *	 svp - ctime updated
3997 */
3998/* ARGSUSED */
3999static int
4000zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
4001    caller_context_t *ct, int flags)
4002{
4003	znode_t		*dzp = VTOZ(tdvp);
4004	znode_t		*tzp, *szp;
4005	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
4006	zilog_t		*zilog;
4007	zfs_dirlock_t	*dl;
4008	dmu_tx_t	*tx;
4009	vnode_t		*realvp;
4010	int		error;
4011	int		zf = ZNEW;
4012	uint64_t	parent;
4013	uid_t		owner;
4014	boolean_t	waited = B_FALSE;
4015
4016	ASSERT(tdvp->v_type == VDIR);
4017
4018	ZFS_ENTER(zfsvfs);
4019	ZFS_VERIFY_ZP(dzp);
4020	zilog = zfsvfs->z_log;
4021
4022	if (VOP_REALVP(svp, &realvp, ct) == 0)
4023		svp = realvp;
4024
4025	/*
4026	 * POSIX dictates that we return EPERM here.
4027	 * Better choices include ENOTSUP or EISDIR.
4028	 */
4029	if (svp->v_type == VDIR) {
4030		ZFS_EXIT(zfsvfs);
4031		return (SET_ERROR(EPERM));
4032	}
4033
4034	szp = VTOZ(svp);
4035	ZFS_VERIFY_ZP(szp);
4036
4037	/*
4038	 * We check z_zfsvfs rather than v_vfsp here, because snapshots and the
4039	 * ctldir appear to have the same v_vfsp.
4040	 */
4041	if (szp->z_zfsvfs != zfsvfs || zfsctl_is_node(svp)) {
4042		ZFS_EXIT(zfsvfs);
4043		return (SET_ERROR(EXDEV));
4044	}
4045
4046	/* Prevent links to .zfs/shares files */
4047
4048	if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
4049	    &parent, sizeof (uint64_t))) != 0) {
4050		ZFS_EXIT(zfsvfs);
4051		return (error);
4052	}
4053	if (parent == zfsvfs->z_shares_dir) {
4054		ZFS_EXIT(zfsvfs);
4055		return (SET_ERROR(EPERM));
4056	}
4057
4058	if (zfsvfs->z_utf8 && u8_validate(name,
4059	    strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4060		ZFS_EXIT(zfsvfs);
4061		return (SET_ERROR(EILSEQ));
4062	}
4063	if (flags & FIGNORECASE)
4064		zf |= ZCILOOK;
4065
4066	/*
4067	 * We do not support links between attributes and non-attributes
4068	 * because of the potential security risk of creating links
4069	 * into "normal" file space in order to circumvent restrictions
4070	 * imposed in attribute space.
4071	 */
4072	if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
4073		ZFS_EXIT(zfsvfs);
4074		return (SET_ERROR(EINVAL));
4075	}
4076
4077
4078	owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
4079	if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
4080		ZFS_EXIT(zfsvfs);
4081		return (SET_ERROR(EPERM));
4082	}
4083
4084	if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
4085		ZFS_EXIT(zfsvfs);
4086		return (error);
4087	}
4088
4089top:
4090	/*
4091	 * Attempt to lock directory; fail if entry already exists.
4092	 */
4093	error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
4094	if (error) {
4095		ZFS_EXIT(zfsvfs);
4096		return (error);
4097	}
4098
4099	tx = dmu_tx_create(zfsvfs->z_os);
4100	dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4101	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4102	zfs_sa_upgrade_txholds(tx, szp);
4103	zfs_sa_upgrade_txholds(tx, dzp);
4104	error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
4105	if (error) {
4106		zfs_dirent_unlock(dl);
4107		if (error == ERESTART) {
4108			waited = B_TRUE;
4109			dmu_tx_wait(tx);
4110			dmu_tx_abort(tx);
4111			goto top;
4112		}
4113		dmu_tx_abort(tx);
4114		ZFS_EXIT(zfsvfs);
4115		return (error);
4116	}
4117
4118	error = zfs_link_create(dl, szp, tx, 0);
4119
4120	if (error == 0) {
4121		uint64_t txtype = TX_LINK;
4122		if (flags & FIGNORECASE)
4123			txtype |= TX_CI;
4124		zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4125	}
4126
4127	dmu_tx_commit(tx);
4128
4129	zfs_dirent_unlock(dl);
4130
4131	if (error == 0) {
4132		vnevent_link(svp, ct);
4133	}
4134
4135	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4136		zil_commit(zilog, 0);
4137
4138	ZFS_EXIT(zfsvfs);
4139	return (error);
4140}
4141
4142/*
4143 * zfs_null_putapage() is used when the file system has been force
4144 * unmounted. It just drops the pages.
4145 */
4146/* ARGSUSED */
4147static int
4148zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
4149    size_t *lenp, int flags, cred_t *cr)
4150{
4151	pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR);
4152	return (0);
4153}
4154
4155/*
4156 * Push a page out to disk, klustering if possible.
4157 *
4158 *	IN:	vp	- file to push page to.
4159 *		pp	- page to push.
4160 *		flags	- additional flags.
4161 *		cr	- credentials of caller.
4162 *
4163 *	OUT:	offp	- start of range pushed.
4164 *		lenp	- len of range pushed.
4165 *
4166 *	RETURN:	0 on success, error code on failure.
4167 *
4168 * NOTE: callers must have locked the page to be pushed.  On
4169 * exit, the page (and all other pages in the kluster) must be
4170 * unlocked.
4171 */
4172/* ARGSUSED */
4173static int
4174zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
4175    size_t *lenp, int flags, cred_t *cr)
4176{
4177	znode_t		*zp = VTOZ(vp);
4178	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4179	dmu_tx_t	*tx;
4180	u_offset_t	off, koff;
4181	size_t		len, klen;
4182	int		err;
4183
4184	off = pp->p_offset;
4185	len = PAGESIZE;
4186	/*
4187	 * If our blocksize is bigger than the page size, try to kluster
4188	 * multiple pages so that we write a full block (thus avoiding
4189	 * a read-modify-write).
4190	 */
4191	if (off < zp->z_size && zp->z_blksz > PAGESIZE) {
4192		klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
4193		koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0;
4194		ASSERT(koff <= zp->z_size);
4195		if (koff + klen > zp->z_size)
4196			klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE);
4197		pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags);
4198	}
4199	ASSERT3U(btop(len), ==, btopr(len));
4200
4201	/*
4202	 * Can't push pages past end-of-file.
4203	 */
4204	if (off >= zp->z_size) {
4205		/* ignore all pages */
4206		err = 0;
4207		goto out;
4208	} else if (off + len > zp->z_size) {
4209		int npages = btopr(zp->z_size - off);
4210		page_t *trunc;
4211
4212		page_list_break(&pp, &trunc, npages);
4213		/* ignore pages past end of file */
4214		if (trunc)
4215			pvn_write_done(trunc, flags);
4216		len = zp->z_size - off;
4217	}
4218
4219	if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
4220	    zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
4221		err = SET_ERROR(EDQUOT);
4222		goto out;
4223	}
4224	tx = dmu_tx_create(zfsvfs->z_os);
4225	dmu_tx_hold_write(tx, zp->z_id, off, len);
4226
4227	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4228	zfs_sa_upgrade_txholds(tx, zp);
4229	err = dmu_tx_assign(tx, TXG_WAIT);
4230	if (err != 0) {
4231		dmu_tx_abort(tx);
4232		goto out;
4233	}
4234
4235	if (zp->z_blksz <= PAGESIZE) {
4236		caddr_t va = zfs_map_page(pp, S_READ);
4237		ASSERT3U(len, <=, PAGESIZE);
4238		dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx);
4239		zfs_unmap_page(pp, va);
4240	} else {
4241		err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx);
4242	}
4243
4244	if (err == 0) {
4245		uint64_t mtime[2], ctime[2];
4246		sa_bulk_attr_t bulk[3];
4247		int count = 0;
4248
4249		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
4250		    &mtime, 16);
4251		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
4252		    &ctime, 16);
4253		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
4254		    &zp->z_pflags, 8);
4255		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
4256		    B_TRUE);
4257		zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
4258	}
4259	dmu_tx_commit(tx);
4260
4261out:
4262	pvn_write_done(pp, (err ? B_ERROR : 0) | flags);
4263	if (offp)
4264		*offp = off;
4265	if (lenp)
4266		*lenp = len;
4267
4268	return (err);
4269}
4270
4271/*
4272 * Copy the portion of the file indicated from pages into the file.
4273 * The pages are stored in a page list attached to the files vnode.
4274 *
4275 *	IN:	vp	- vnode of file to push page data to.
4276 *		off	- position in file to put data.
4277 *		len	- amount of data to write.
4278 *		flags	- flags to control the operation.
4279 *		cr	- credentials of caller.
4280 *		ct	- caller context.
4281 *
4282 *	RETURN:	0 on success, error code on failure.
4283 *
4284 * Timestamps:
4285 *	vp - ctime|mtime updated
4286 */
4287/*ARGSUSED*/
4288static int
4289zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4290    caller_context_t *ct)
4291{
4292	znode_t		*zp = VTOZ(vp);
4293	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4294	page_t		*pp;
4295	size_t		io_len;
4296	u_offset_t	io_off;
4297	uint_t		blksz;
4298	rl_t		*rl;
4299	int		error = 0;
4300
4301	ZFS_ENTER(zfsvfs);
4302	ZFS_VERIFY_ZP(zp);
4303
4304	/*
4305	 * There's nothing to do if no data is cached.
4306	 */
4307	if (!vn_has_cached_data(vp)) {
4308		ZFS_EXIT(zfsvfs);
4309		return (0);
4310	}
4311
4312	/*
4313	 * Align this request to the file block size in case we kluster.
4314	 * XXX - this can result in pretty aggresive locking, which can
4315	 * impact simultanious read/write access.  One option might be
4316	 * to break up long requests (len == 0) into block-by-block
4317	 * operations to get narrower locking.
4318	 */
4319	blksz = zp->z_blksz;
4320	if (ISP2(blksz))
4321		io_off = P2ALIGN_TYPED(off, blksz, u_offset_t);
4322	else
4323		io_off = 0;
4324	if (len > 0 && ISP2(blksz))
4325		io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t);
4326	else
4327		io_len = 0;
4328
4329	if (io_len == 0) {
4330		/*
4331		 * Search the entire vp list for pages >= io_off.
4332		 */
4333		rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER);
4334		error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr);
4335		goto out;
4336	}
4337	rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER);
4338
4339	if (off > zp->z_size) {
4340		/* past end of file */
4341		zfs_range_unlock(rl);
4342		ZFS_EXIT(zfsvfs);
4343		return (0);
4344	}
4345
4346	len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off);
4347
4348	for (off = io_off; io_off < off + len; io_off += io_len) {
4349		if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
4350			pp = page_lookup(vp, io_off,
4351			    (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED);
4352		} else {
4353			pp = page_lookup_nowait(vp, io_off,
4354			    (flags & B_FREE) ? SE_EXCL : SE_SHARED);
4355		}
4356
4357		if (pp != NULL && pvn_getdirty(pp, flags)) {
4358			int err;
4359
4360			/*
4361			 * Found a dirty page to push
4362			 */
4363			err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
4364			if (err)
4365				error = err;
4366		} else {
4367			io_len = PAGESIZE;
4368		}
4369	}
4370out:
4371	zfs_range_unlock(rl);
4372	if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4373		zil_commit(zfsvfs->z_log, zp->z_id);
4374	ZFS_EXIT(zfsvfs);
4375	return (error);
4376}
4377
4378/*ARGSUSED*/
4379void
4380zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
4381{
4382	znode_t	*zp = VTOZ(vp);
4383	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4384	int error;
4385
4386	rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4387	if (zp->z_sa_hdl == NULL) {
4388		/*
4389		 * The fs has been unmounted, or we did a
4390		 * suspend/resume and this file no longer exists.
4391		 */
4392		if (vn_has_cached_data(vp)) {
4393			(void) pvn_vplist_dirty(vp, 0, zfs_null_putapage,
4394			    B_INVAL, cr);
4395		}
4396
4397		mutex_enter(&zp->z_lock);
4398		mutex_enter(&vp->v_lock);
4399		ASSERT(vp->v_count == 1);
4400		vp->v_count = 0;
4401		mutex_exit(&vp->v_lock);
4402		mutex_exit(&zp->z_lock);
4403		rw_exit(&zfsvfs->z_teardown_inactive_lock);
4404		zfs_znode_free(zp);
4405		return;
4406	}
4407
4408	/*
4409	 * Attempt to push any data in the page cache.  If this fails
4410	 * we will get kicked out later in zfs_zinactive().
4411	 */
4412	if (vn_has_cached_data(vp)) {
4413		(void) pvn_vplist_dirty(vp, 0, zfs_putapage, B_INVAL|B_ASYNC,
4414		    cr);
4415	}
4416
4417	if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4418		dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4419
4420		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4421		zfs_sa_upgrade_txholds(tx, zp);
4422		error = dmu_tx_assign(tx, TXG_WAIT);
4423		if (error) {
4424			dmu_tx_abort(tx);
4425		} else {
4426			mutex_enter(&zp->z_lock);
4427			(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4428			    (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
4429			zp->z_atime_dirty = 0;
4430			mutex_exit(&zp->z_lock);
4431			dmu_tx_commit(tx);
4432		}
4433	}
4434
4435	zfs_zinactive(zp);
4436	rw_exit(&zfsvfs->z_teardown_inactive_lock);
4437}
4438
4439/*
4440 * Bounds-check the seek operation.
4441 *
4442 *	IN:	vp	- vnode seeking within
4443 *		ooff	- old file offset
4444 *		noffp	- pointer to new file offset
4445 *		ct	- caller context
4446 *
4447 *	RETURN:	0 on success, EINVAL if new offset invalid.
4448 */
4449/* ARGSUSED */
4450static int
4451zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp,
4452    caller_context_t *ct)
4453{
4454	if (vp->v_type == VDIR)
4455		return (0);
4456	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4457}
4458
4459/*
4460 * Pre-filter the generic locking function to trap attempts to place
4461 * a mandatory lock on a memory mapped file.
4462 */
4463static int
4464zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
4465    flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct)
4466{
4467	znode_t *zp = VTOZ(vp);
4468	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4469
4470	ZFS_ENTER(zfsvfs);
4471	ZFS_VERIFY_ZP(zp);
4472
4473	/*
4474	 * We are following the UFS semantics with respect to mapcnt
4475	 * here: If we see that the file is mapped already, then we will
4476	 * return an error, but we don't worry about races between this
4477	 * function and zfs_map().
4478	 */
4479	if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) {
4480		ZFS_EXIT(zfsvfs);
4481		return (SET_ERROR(EAGAIN));
4482	}
4483	ZFS_EXIT(zfsvfs);
4484	return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4485}
4486
4487/*
4488 * If we can't find a page in the cache, we will create a new page
4489 * and fill it with file data.  For efficiency, we may try to fill
4490 * multiple pages at once (klustering) to fill up the supplied page
4491 * list.  Note that the pages to be filled are held with an exclusive
4492 * lock to prevent access by other threads while they are being filled.
4493 */
4494static int
4495zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
4496    caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw)
4497{
4498	znode_t *zp = VTOZ(vp);
4499	page_t *pp, *cur_pp;
4500	objset_t *os = zp->z_zfsvfs->z_os;
4501	u_offset_t io_off, total;
4502	size_t io_len;
4503	int err;
4504
4505	if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) {
4506		/*
4507		 * We only have a single page, don't bother klustering
4508		 */
4509		io_off = off;
4510		io_len = PAGESIZE;
4511		pp = page_create_va(vp, io_off, io_len,
4512		    PG_EXCL | PG_WAIT, seg, addr);
4513	} else {
4514		/*
4515		 * Try to find enough pages to fill the page list
4516		 */
4517		pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4518		    &io_len, off, plsz, 0);
4519	}
4520	if (pp == NULL) {
4521		/*
4522		 * The page already exists, nothing to do here.
4523		 */
4524		*pl = NULL;
4525		return (0);
4526	}
4527
4528	/*
4529	 * Fill the pages in the kluster.
4530	 */
4531	cur_pp = pp;
4532	for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4533		caddr_t va;
4534
4535		ASSERT3U(io_off, ==, cur_pp->p_offset);
4536		va = zfs_map_page(cur_pp, S_WRITE);
4537		err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4538		    DMU_READ_PREFETCH);
4539		zfs_unmap_page(cur_pp, va);
4540		if (err) {
4541			/* On error, toss the entire kluster */
4542			pvn_read_done(pp, B_ERROR);
4543			/* convert checksum errors into IO errors */
4544			if (err == ECKSUM)
4545				err = SET_ERROR(EIO);
4546			return (err);
4547		}
4548		cur_pp = cur_pp->p_next;
4549	}
4550
4551	/*
4552	 * Fill in the page list array from the kluster starting
4553	 * from the desired offset `off'.
4554	 * NOTE: the page list will always be null terminated.
4555	 */
4556	pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4557	ASSERT(pl == NULL || (*pl)->p_offset == off);
4558
4559	return (0);
4560}
4561
4562/*
4563 * Return pointers to the pages for the file region [off, off + len]
4564 * in the pl array.  If plsz is greater than len, this function may
4565 * also return page pointers from after the specified region
4566 * (i.e. the region [off, off + plsz]).  These additional pages are
4567 * only returned if they are already in the cache, or were created as
4568 * part of a klustered read.
4569 *
4570 *	IN:	vp	- vnode of file to get data from.
4571 *		off	- position in file to get data from.
4572 *		len	- amount of data to retrieve.
4573 *		plsz	- length of provided page list.
4574 *		seg	- segment to obtain pages for.
4575 *		addr	- virtual address of fault.
4576 *		rw	- mode of created pages.
4577 *		cr	- credentials of caller.
4578 *		ct	- caller context.
4579 *
4580 *	OUT:	protp	- protection mode of created pages.
4581 *		pl	- list of pages created.
4582 *
4583 *	RETURN:	0 on success, error code on failure.
4584 *
4585 * Timestamps:
4586 *	vp - atime updated
4587 */
4588/* ARGSUSED */
4589static int
4590zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
4591    page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
4592    enum seg_rw rw, cred_t *cr, caller_context_t *ct)
4593{
4594	znode_t		*zp = VTOZ(vp);
4595	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4596	page_t		**pl0 = pl;
4597	int		err = 0;
4598
4599	/* we do our own caching, faultahead is unnecessary */
4600	if (pl == NULL)
4601		return (0);
4602	else if (len > plsz)
4603		len = plsz;
4604	else
4605		len = P2ROUNDUP(len, PAGESIZE);
4606	ASSERT(plsz >= len);
4607
4608	ZFS_ENTER(zfsvfs);
4609	ZFS_VERIFY_ZP(zp);
4610
4611	if (protp)
4612		*protp = PROT_ALL;
4613
4614	/*
4615	 * Loop through the requested range [off, off + len) looking
4616	 * for pages.  If we don't find a page, we will need to create
4617	 * a new page and fill it with data from the file.
4618	 */
4619	while (len > 0) {
4620		if (*pl = page_lookup(vp, off, SE_SHARED))
4621			*(pl+1) = NULL;
4622		else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw))
4623			goto out;
4624		while (*pl) {
4625			ASSERT3U((*pl)->p_offset, ==, off);
4626			off += PAGESIZE;
4627			addr += PAGESIZE;
4628			if (len > 0) {
4629				ASSERT3U(len, >=, PAGESIZE);
4630				len -= PAGESIZE;
4631			}
4632			ASSERT3U(plsz, >=, PAGESIZE);
4633			plsz -= PAGESIZE;
4634			pl++;
4635		}
4636	}
4637
4638	/*
4639	 * Fill out the page array with any pages already in the cache.
4640	 */
4641	while (plsz > 0 &&
4642	    (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) {
4643			off += PAGESIZE;
4644			plsz -= PAGESIZE;
4645	}
4646out:
4647	if (err) {
4648		/*
4649		 * Release any pages we have previously locked.
4650		 */
4651		while (pl > pl0)
4652			page_unlock(*--pl);
4653	} else {
4654		ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4655	}
4656
4657	*pl = NULL;
4658
4659	ZFS_EXIT(zfsvfs);
4660	return (err);
4661}
4662
4663/*
4664 * Request a memory map for a section of a file.  This code interacts
4665 * with common code and the VM system as follows:
4666 *
4667 * - common code calls mmap(), which ends up in smmap_common()
4668 * - this calls VOP_MAP(), which takes you into (say) zfs
4669 * - zfs_map() calls as_map(), passing segvn_create() as the callback
4670 * - segvn_create() creates the new segment and calls VOP_ADDMAP()
4671 * - zfs_addmap() updates z_mapcnt
4672 */
4673/*ARGSUSED*/
4674static int
4675zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4676    size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4677    caller_context_t *ct)
4678{
4679	znode_t *zp = VTOZ(vp);
4680	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4681	segvn_crargs_t	vn_a;
4682	int		error;
4683
4684	ZFS_ENTER(zfsvfs);
4685	ZFS_VERIFY_ZP(zp);
4686
4687	if ((prot & PROT_WRITE) && (zp->z_pflags &
4688	    (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4689		ZFS_EXIT(zfsvfs);
4690		return (SET_ERROR(EPERM));
4691	}
4692
4693	if ((prot & (PROT_READ | PROT_EXEC)) &&
4694	    (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4695		ZFS_EXIT(zfsvfs);
4696		return (SET_ERROR(EACCES));
4697	}
4698
4699	if (vp->v_flag & VNOMAP) {
4700		ZFS_EXIT(zfsvfs);
4701		return (SET_ERROR(ENOSYS));
4702	}
4703
4704	if (off < 0 || len > MAXOFFSET_T - off) {
4705		ZFS_EXIT(zfsvfs);
4706		return (SET_ERROR(ENXIO));
4707	}
4708
4709	if (vp->v_type != VREG) {
4710		ZFS_EXIT(zfsvfs);
4711		return (SET_ERROR(ENODEV));
4712	}
4713
4714	/*
4715	 * If file is locked, disallow mapping.
4716	 */
4717	if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) {
4718		ZFS_EXIT(zfsvfs);
4719		return (SET_ERROR(EAGAIN));
4720	}
4721
4722	as_rangelock(as);
4723	error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4724	if (error != 0) {
4725		as_rangeunlock(as);
4726		ZFS_EXIT(zfsvfs);
4727		return (error);
4728	}
4729
4730	vn_a.vp = vp;
4731	vn_a.offset = (u_offset_t)off;
4732	vn_a.type = flags & MAP_TYPE;
4733	vn_a.prot = prot;
4734	vn_a.maxprot = maxprot;
4735	vn_a.cred = cr;
4736	vn_a.amp = NULL;
4737	vn_a.flags = flags & ~MAP_TYPE;
4738	vn_a.szc = 0;
4739	vn_a.lgrp_mem_policy_flags = 0;
4740
4741	error = as_map(as, *addrp, len, segvn_create, &vn_a);
4742
4743	as_rangeunlock(as);
4744	ZFS_EXIT(zfsvfs);
4745	return (error);
4746}
4747
4748/* ARGSUSED */
4749static int
4750zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4751    size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4752    caller_context_t *ct)
4753{
4754	uint64_t pages = btopr(len);
4755
4756	atomic_add_64(&VTOZ(vp)->z_mapcnt, pages);
4757	return (0);
4758}
4759
4760/*
4761 * The reason we push dirty pages as part of zfs_delmap() is so that we get a
4762 * more accurate mtime for the associated file.  Since we don't have a way of
4763 * detecting when the data was actually modified, we have to resort to
4764 * heuristics.  If an explicit msync() is done, then we mark the mtime when the
4765 * last page is pushed.  The problem occurs when the msync() call is omitted,
4766 * which by far the most common case:
4767 *
4768 *	open()
4769 *	mmap()
4770 *	<modify memory>
4771 *	munmap()
4772 *	close()
4773 *	<time lapse>
4774 *	putpage() via fsflush
4775 *
4776 * If we wait until fsflush to come along, we can have a modification time that
4777 * is some arbitrary point in the future.  In order to prevent this in the
4778 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
4779 * torn down.
4780 */
4781/* ARGSUSED */
4782static int
4783zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4784    size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
4785    caller_context_t *ct)
4786{
4787	uint64_t pages = btopr(len);
4788
4789	ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
4790	atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
4791
4792	if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
4793	    vn_has_cached_data(vp))
4794		(void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct);
4795
4796	return (0);
4797}
4798
4799/*
4800 * Free or allocate space in a file.  Currently, this function only
4801 * supports the `F_FREESP' command.  However, this command is somewhat
4802 * misnamed, as its functionality includes the ability to allocate as
4803 * well as free space.
4804 *
4805 *	IN:	vp	- vnode of file to free data in.
4806 *		cmd	- action to take (only F_FREESP supported).
4807 *		bfp	- section of file to free/alloc.
4808 *		flag	- current file open mode flags.
4809 *		offset	- current file offset.
4810 *		cr	- credentials of caller [UNUSED].
4811 *		ct	- caller context.
4812 *
4813 *	RETURN:	0 on success, error code on failure.
4814 *
4815 * Timestamps:
4816 *	vp - ctime|mtime updated
4817 */
4818/* ARGSUSED */
4819static int
4820zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
4821    offset_t offset, cred_t *cr, caller_context_t *ct)
4822{
4823	znode_t		*zp = VTOZ(vp);
4824	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4825	uint64_t	off, len;
4826	int		error;
4827
4828	ZFS_ENTER(zfsvfs);
4829	ZFS_VERIFY_ZP(zp);
4830
4831	if (cmd != F_FREESP) {
4832		ZFS_EXIT(zfsvfs);
4833		return (SET_ERROR(EINVAL));
4834	}
4835
4836	/*
4837	 * In a case vp->v_vfsp != zp->z_zfsvfs->z_vfs (e.g. snapshots) our
4838	 * callers might not be able to detect properly that we are read-only,
4839	 * so check it explicitly here.
4840	 */
4841	if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
4842		ZFS_EXIT(zfsvfs);
4843		return (SET_ERROR(EROFS));
4844	}
4845
4846	if (error = convoff(vp, bfp, 0, offset)) {
4847		ZFS_EXIT(zfsvfs);
4848		return (error);
4849	}
4850
4851	if (bfp->l_len < 0) {
4852		ZFS_EXIT(zfsvfs);
4853		return (SET_ERROR(EINVAL));
4854	}
4855
4856	off = bfp->l_start;
4857	len = bfp->l_len; /* 0 means from off to end of file */
4858
4859	error = zfs_freesp(zp, off, len, flag, TRUE);
4860
4861	if (error == 0 && off == 0 && len == 0)
4862		vnevent_truncate(ZTOV(zp), ct);
4863
4864	ZFS_EXIT(zfsvfs);
4865	return (error);
4866}
4867
4868/*ARGSUSED*/
4869static int
4870zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
4871{
4872	znode_t		*zp = VTOZ(vp);
4873	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4874	uint32_t	gen;
4875	uint64_t	gen64;
4876	uint64_t	object = zp->z_id;
4877	zfid_short_t	*zfid;
4878	int		size, i, error;
4879
4880	ZFS_ENTER(zfsvfs);
4881	ZFS_VERIFY_ZP(zp);
4882
4883	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
4884	    &gen64, sizeof (uint64_t))) != 0) {
4885		ZFS_EXIT(zfsvfs);
4886		return (error);
4887	}
4888
4889	gen = (uint32_t)gen64;
4890
4891	size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
4892	if (fidp->fid_len < size) {
4893		fidp->fid_len = size;
4894		ZFS_EXIT(zfsvfs);
4895		return (SET_ERROR(ENOSPC));
4896	}
4897
4898	zfid = (zfid_short_t *)fidp;
4899
4900	zfid->zf_len = size;
4901
4902	for (i = 0; i < sizeof (zfid->zf_object); i++)
4903		zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4904
4905	/* Must have a non-zero generation number to distinguish from .zfs */
4906	if (gen == 0)
4907		gen = 1;
4908	for (i = 0; i < sizeof (zfid->zf_gen); i++)
4909		zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4910
4911	if (size == LONG_FID_LEN) {
4912		uint64_t	objsetid = dmu_objset_id(zfsvfs->z_os);
4913		zfid_long_t	*zlfid;
4914
4915		zlfid = (zfid_long_t *)fidp;
4916
4917		for (i = 0; i < sizeof (zlfid->zf_setid); i++)
4918			zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
4919
4920		/* XXX - this should be the generation number for the objset */
4921		for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
4922			zlfid->zf_setgen[i] = 0;
4923	}
4924
4925	ZFS_EXIT(zfsvfs);
4926	return (0);
4927}
4928
4929static int
4930zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
4931    caller_context_t *ct)
4932{
4933	znode_t		*zp, *xzp;
4934	zfsvfs_t	*zfsvfs;
4935	zfs_dirlock_t	*dl;
4936	int		error;
4937
4938	switch (cmd) {
4939	case _PC_LINK_MAX:
4940		*valp = ULONG_MAX;
4941		return (0);
4942
4943	case _PC_FILESIZEBITS:
4944		*valp = 64;
4945		return (0);
4946
4947	case _PC_XATTR_EXISTS:
4948		zp = VTOZ(vp);
4949		zfsvfs = zp->z_zfsvfs;
4950		ZFS_ENTER(zfsvfs);
4951		ZFS_VERIFY_ZP(zp);
4952		*valp = 0;
4953		error = zfs_dirent_lock(&dl, zp, "", &xzp,
4954		    ZXATTR | ZEXISTS | ZSHARED, NULL, NULL);
4955		if (error == 0) {
4956			zfs_dirent_unlock(dl);
4957			if (!zfs_dirempty(xzp))
4958				*valp = 1;
4959			VN_RELE(ZTOV(xzp));
4960		} else if (error == ENOENT) {
4961			/*
4962			 * If there aren't extended attributes, it's the
4963			 * same as having zero of them.
4964			 */
4965			error = 0;
4966		}
4967		ZFS_EXIT(zfsvfs);
4968		return (error);
4969
4970	case _PC_SATTR_ENABLED:
4971	case _PC_SATTR_EXISTS:
4972		*valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
4973		    (vp->v_type == VREG || vp->v_type == VDIR);
4974		return (0);
4975
4976	case _PC_ACCESS_FILTERING:
4977		*valp = vfs_has_feature(vp->v_vfsp, VFSFT_ACCESS_FILTER) &&
4978		    vp->v_type == VDIR;
4979		return (0);
4980
4981	case _PC_ACL_ENABLED:
4982		*valp = _ACL_ACE_ENABLED;
4983		return (0);
4984
4985	case _PC_MIN_HOLE_SIZE:
4986		*valp = (ulong_t)SPA_MINBLOCKSIZE;
4987		return (0);
4988
4989	case _PC_TIMESTAMP_RESOLUTION:
4990		/* nanosecond timestamp resolution */
4991		*valp = 1L;
4992		return (0);
4993
4994	default:
4995		return (fs_pathconf(vp, cmd, valp, cr, ct));
4996	}
4997}
4998
4999/*ARGSUSED*/
5000static int
5001zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
5002    caller_context_t *ct)
5003{
5004	znode_t *zp = VTOZ(vp);
5005	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5006	int error;
5007	boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5008
5009	ZFS_ENTER(zfsvfs);
5010	ZFS_VERIFY_ZP(zp);
5011	error = zfs_getacl(zp, vsecp, skipaclchk, cr);
5012	ZFS_EXIT(zfsvfs);
5013
5014	return (error);
5015}
5016
5017/*ARGSUSED*/
5018static int
5019zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
5020    caller_context_t *ct)
5021{
5022	znode_t *zp = VTOZ(vp);
5023	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5024	int error;
5025	boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5026	zilog_t	*zilog = zfsvfs->z_log;
5027
5028	ZFS_ENTER(zfsvfs);
5029	ZFS_VERIFY_ZP(zp);
5030
5031	error = zfs_setacl(zp, vsecp, skipaclchk, cr);
5032
5033	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
5034		zil_commit(zilog, 0);
5035
5036	ZFS_EXIT(zfsvfs);
5037	return (error);
5038}
5039
5040/*
5041 * The smallest read we may consider to loan out an arcbuf.
5042 * This must be a power of 2.
5043 */
5044int zcr_blksz_min = (1 << 10);	/* 1K */
5045/*
5046 * If set to less than the file block size, allow loaning out of an
5047 * arcbuf for a partial block read.  This must be a power of 2.
5048 */
5049int zcr_blksz_max = (1 << 17);	/* 128K */
5050
5051/*ARGSUSED*/
5052static int
5053zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
5054    caller_context_t *ct)
5055{
5056	znode_t	*zp = VTOZ(vp);
5057	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5058	int max_blksz = zfsvfs->z_max_blksz;
5059	uio_t *uio = &xuio->xu_uio;
5060	ssize_t size = uio->uio_resid;
5061	offset_t offset = uio->uio_loffset;
5062	int blksz;
5063	int fullblk, i;
5064	arc_buf_t *abuf;
5065	ssize_t maxsize;
5066	int preamble, postamble;
5067
5068	if (xuio->xu_type != UIOTYPE_ZEROCOPY)
5069		return (SET_ERROR(EINVAL));
5070
5071	ZFS_ENTER(zfsvfs);
5072	ZFS_VERIFY_ZP(zp);
5073	switch (ioflag) {
5074	case UIO_WRITE:
5075		/*
5076		 * Loan out an arc_buf for write if write size is bigger than
5077		 * max_blksz, and the file's block size is also max_blksz.
5078		 */
5079		blksz = max_blksz;
5080		if (size < blksz || zp->z_blksz != blksz) {
5081			ZFS_EXIT(zfsvfs);
5082			return (SET_ERROR(EINVAL));
5083		}
5084		/*
5085		 * Caller requests buffers for write before knowing where the
5086		 * write offset might be (e.g. NFS TCP write).
5087		 */
5088		if (offset == -1) {
5089			preamble = 0;
5090		} else {
5091			preamble = P2PHASE(offset, blksz);
5092			if (preamble) {
5093				preamble = blksz - preamble;
5094				size -= preamble;
5095			}
5096		}
5097
5098		postamble = P2PHASE(size, blksz);
5099		size -= postamble;
5100
5101		fullblk = size / blksz;
5102		(void) dmu_xuio_init(xuio,
5103		    (preamble != 0) + fullblk + (postamble != 0));
5104		DTRACE_PROBE3(zfs_reqzcbuf_align, int, preamble,
5105		    int, postamble, int,
5106		    (preamble != 0) + fullblk + (postamble != 0));
5107
5108		/*
5109		 * Have to fix iov base/len for partial buffers.  They
5110		 * currently represent full arc_buf's.
5111		 */
5112		if (preamble) {
5113			/* data begins in the middle of the arc_buf */
5114			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5115			    blksz);
5116			ASSERT(abuf);
5117			(void) dmu_xuio_add(xuio, abuf,
5118			    blksz - preamble, preamble);
5119		}
5120
5121		for (i = 0; i < fullblk; i++) {
5122			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5123			    blksz);
5124			ASSERT(abuf);
5125			(void) dmu_xuio_add(xuio, abuf, 0, blksz);
5126		}
5127
5128		if (postamble) {
5129			/* data ends in the middle of the arc_buf */
5130			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5131			    blksz);
5132			ASSERT(abuf);
5133			(void) dmu_xuio_add(xuio, abuf, 0, postamble);
5134		}
5135		break;
5136	case UIO_READ:
5137		/*
5138		 * Loan out an arc_buf for read if the read size is larger than
5139		 * the current file block size.  Block alignment is not
5140		 * considered.  Partial arc_buf will be loaned out for read.
5141		 */
5142		blksz = zp->z_blksz;
5143		if (blksz < zcr_blksz_min)
5144			blksz = zcr_blksz_min;
5145		if (blksz > zcr_blksz_max)
5146			blksz = zcr_blksz_max;
5147		/* avoid potential complexity of dealing with it */
5148		if (blksz > max_blksz) {
5149			ZFS_EXIT(zfsvfs);
5150			return (SET_ERROR(EINVAL));
5151		}
5152
5153		maxsize = zp->z_size - uio->uio_loffset;
5154		if (size > maxsize)
5155			size = maxsize;
5156
5157		if (size < blksz || vn_has_cached_data(vp)) {
5158			ZFS_EXIT(zfsvfs);
5159			return (SET_ERROR(EINVAL));
5160		}
5161		break;
5162	default:
5163		ZFS_EXIT(zfsvfs);
5164		return (SET_ERROR(EINVAL));
5165	}
5166
5167	uio->uio_extflg = UIO_XUIO;
5168	XUIO_XUZC_RW(xuio) = ioflag;
5169	ZFS_EXIT(zfsvfs);
5170	return (0);
5171}
5172
5173/*ARGSUSED*/
5174static int
5175zfs_retzcbuf(vnode_t *vp, xuio_t *xuio, cred_t *cr, caller_context_t *ct)
5176{
5177	int i;
5178	arc_buf_t *abuf;
5179	int ioflag = XUIO_XUZC_RW(xuio);
5180
5181	ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
5182
5183	i = dmu_xuio_cnt(xuio);
5184	while (i-- > 0) {
5185		abuf = dmu_xuio_arcbuf(xuio, i);
5186		/*
5187		 * if abuf == NULL, it must be a write buffer
5188		 * that has been returned in zfs_write().
5189		 */
5190		if (abuf)
5191			dmu_return_arcbuf(abuf);
5192		ASSERT(abuf || ioflag == UIO_WRITE);
5193	}
5194
5195	dmu_xuio_fini(xuio);
5196	return (0);
5197}
5198
5199/*
5200 * Predeclare these here so that the compiler assumes that
5201 * this is an "old style" function declaration that does
5202 * not include arguments => we won't get type mismatch errors
5203 * in the initializations that follow.
5204 */
5205static int zfs_inval();
5206static int zfs_isdir();
5207
5208static int
5209zfs_inval()
5210{
5211	return (SET_ERROR(EINVAL));
5212}
5213
5214static int
5215zfs_isdir()
5216{
5217	return (SET_ERROR(EISDIR));
5218}
5219/*
5220 * Directory vnode operations template
5221 */
5222vnodeops_t *zfs_dvnodeops;
5223const fs_operation_def_t zfs_dvnodeops_template[] = {
5224	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5225	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5226	VOPNAME_READ,		{ .error = zfs_isdir },
5227	VOPNAME_WRITE,		{ .error = zfs_isdir },
5228	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5229	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5230	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5231	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5232	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5233	VOPNAME_CREATE,		{ .vop_create = zfs_create },
5234	VOPNAME_REMOVE,		{ .vop_remove = zfs_remove },
5235	VOPNAME_LINK,		{ .vop_link = zfs_link },
5236	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5237	VOPNAME_MKDIR,		{ .vop_mkdir = zfs_mkdir },
5238	VOPNAME_RMDIR,		{ .vop_rmdir = zfs_rmdir },
5239	VOPNAME_READDIR,	{ .vop_readdir = zfs_readdir },
5240	VOPNAME_SYMLINK,	{ .vop_symlink = zfs_symlink },
5241	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5242	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5243	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5244	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5245	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5246	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5247	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5248	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5249	NULL,			NULL
5250};
5251
5252/*
5253 * Regular file vnode operations template
5254 */
5255vnodeops_t *zfs_fvnodeops;
5256const fs_operation_def_t zfs_fvnodeops_template[] = {
5257	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5258	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5259	VOPNAME_READ,		{ .vop_read = zfs_read },
5260	VOPNAME_WRITE,		{ .vop_write = zfs_write },
5261	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5262	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5263	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5264	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5265	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5266	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5267	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5268	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5269	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5270	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5271	VOPNAME_FRLOCK,		{ .vop_frlock = zfs_frlock },
5272	VOPNAME_SPACE,		{ .vop_space = zfs_space },
5273	VOPNAME_GETPAGE,	{ .vop_getpage = zfs_getpage },
5274	VOPNAME_PUTPAGE,	{ .vop_putpage = zfs_putpage },
5275	VOPNAME_MAP,		{ .vop_map = zfs_map },
5276	VOPNAME_ADDMAP,		{ .vop_addmap = zfs_addmap },
5277	VOPNAME_DELMAP,		{ .vop_delmap = zfs_delmap },
5278	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5279	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5280	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5281	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5282	VOPNAME_REQZCBUF,	{ .vop_reqzcbuf = zfs_reqzcbuf },
5283	VOPNAME_RETZCBUF,	{ .vop_retzcbuf = zfs_retzcbuf },
5284	NULL,			NULL
5285};
5286
5287/*
5288 * Symbolic link vnode operations template
5289 */
5290vnodeops_t *zfs_symvnodeops;
5291const fs_operation_def_t zfs_symvnodeops_template[] = {
5292	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5293	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5294	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5295	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5296	VOPNAME_READLINK,	{ .vop_readlink = zfs_readlink },
5297	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5298	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5299	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5300	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5301	NULL,			NULL
5302};
5303
5304/*
5305 * special share hidden files vnode operations template
5306 */
5307vnodeops_t *zfs_sharevnodeops;
5308const fs_operation_def_t zfs_sharevnodeops_template[] = {
5309	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5310	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5311	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5312	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5313	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5314	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5315	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5316	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5317	NULL,			NULL
5318};
5319
5320/*
5321 * Extended attribute directory vnode operations template
5322 *
5323 * This template is identical to the directory vnodes
5324 * operation template except for restricted operations:
5325 *	VOP_MKDIR()
5326 *	VOP_SYMLINK()
5327 *
5328 * Note that there are other restrictions embedded in:
5329 *	zfs_create()	- restrict type to VREG
5330 *	zfs_link()	- no links into/out of attribute space
5331 *	zfs_rename()	- no moves into/out of attribute space
5332 */
5333vnodeops_t *zfs_xdvnodeops;
5334const fs_operation_def_t zfs_xdvnodeops_template[] = {
5335	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5336	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5337	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5338	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5339	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5340	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5341	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5342	VOPNAME_CREATE,		{ .vop_create = zfs_create },
5343	VOPNAME_REMOVE,		{ .vop_remove = zfs_remove },
5344	VOPNAME_LINK,		{ .vop_link = zfs_link },
5345	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5346	VOPNAME_MKDIR,		{ .error = zfs_inval },
5347	VOPNAME_RMDIR,		{ .vop_rmdir = zfs_rmdir },
5348	VOPNAME_READDIR,	{ .vop_readdir = zfs_readdir },
5349	VOPNAME_SYMLINK,	{ .error = zfs_inval },
5350	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5351	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5352	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5353	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5354	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5355	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5356	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5357	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5358	NULL,			NULL
5359};
5360
5361/*
5362 * Error vnode operations template
5363 */
5364vnodeops_t *zfs_evnodeops;
5365const fs_operation_def_t zfs_evnodeops_template[] = {
5366	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5367	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5368	NULL,			NULL
5369};
5370