14bff34e3Sthurlow /* 24bff34e3Sthurlow * Copyright (c) 2000-2001 Boris Popov 34bff34e3Sthurlow * All rights reserved. 44bff34e3Sthurlow * 54bff34e3Sthurlow * Redistribution and use in source and binary forms, with or without 64bff34e3Sthurlow * modification, are permitted provided that the following conditions 74bff34e3Sthurlow * are met: 84bff34e3Sthurlow * 1. Redistributions of source code must retain the above copyright 94bff34e3Sthurlow * notice, this list of conditions and the following disclaimer. 104bff34e3Sthurlow * 2. Redistributions in binary form must reproduce the above copyright 114bff34e3Sthurlow * notice, this list of conditions and the following disclaimer in the 124bff34e3Sthurlow * documentation and/or other materials provided with the distribution. 134bff34e3Sthurlow * 3. All advertising materials mentioning features or use of this software 144bff34e3Sthurlow * must display the following acknowledgement: 154bff34e3Sthurlow * This product includes software developed by Boris Popov. 164bff34e3Sthurlow * 4. Neither the name of the author nor the names of any co-contributors 174bff34e3Sthurlow * may be used to endorse or promote products derived from this software 184bff34e3Sthurlow * without specific prior written permission. 194bff34e3Sthurlow * 204bff34e3Sthurlow * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 214bff34e3Sthurlow * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 224bff34e3Sthurlow * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 234bff34e3Sthurlow * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 244bff34e3Sthurlow * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 254bff34e3Sthurlow * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 264bff34e3Sthurlow * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 274bff34e3Sthurlow * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 284bff34e3Sthurlow * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 294bff34e3Sthurlow * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 304bff34e3Sthurlow * SUCH DAMAGE. 314bff34e3Sthurlow * 324bff34e3Sthurlow * $Id: smbfs_vnops.c,v 1.128.36.1 2005/05/27 02:35:28 lindak Exp $ 334bff34e3Sthurlow */ 344bff34e3Sthurlow 354bff34e3Sthurlow /* 36134a1f4eSCasper H.S. Dik * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 37*8329232eSGordon Ross * Copyright 2017 Nexenta Systems, Inc. All rights reserved. 384bff34e3Sthurlow */ 394bff34e3Sthurlow 405f4fc069Sjilinxpd /* 415f4fc069Sjilinxpd * Vnode operations 425f4fc069Sjilinxpd * 435f4fc069Sjilinxpd * This file is similar to nfs3_vnops.c 445f4fc069Sjilinxpd */ 455f4fc069Sjilinxpd 465f4fc069Sjilinxpd #include <sys/param.h> 474bff34e3Sthurlow #include <sys/systm.h> 484bff34e3Sthurlow #include <sys/cred.h> 494bff34e3Sthurlow #include <sys/vnode.h> 504bff34e3Sthurlow #include <sys/vfs.h> 517568150aSgwr #include <sys/filio.h> 524bff34e3Sthurlow #include <sys/uio.h> 534bff34e3Sthurlow #include <sys/dirent.h> 544bff34e3Sthurlow #include <sys/errno.h> 55613a2f6bSGordon Ross #include <sys/sunddi.h> 564bff34e3Sthurlow #include <sys/sysmacros.h> 574bff34e3Sthurlow #include <sys/kmem.h> 584bff34e3Sthurlow #include <sys/cmn_err.h> 594bff34e3Sthurlow #include <sys/vfs_opreg.h> 604bff34e3Sthurlow #include <sys/policy.h> 615f4fc069Sjilinxpd #include <sys/sdt.h> 624e72ade1SGordon Ross #include <sys/taskq_impl.h> 635f4fc069Sjilinxpd #include <sys/zone.h> 645f4fc069Sjilinxpd 65*8329232eSGordon Ross #ifdef _KERNEL 66*8329232eSGordon Ross #include <sys/vmsystm.h> // for desfree 675f4fc069Sjilinxpd #include <vm/hat.h> 685f4fc069Sjilinxpd #include <vm/as.h> 695f4fc069Sjilinxpd #include <vm/page.h> 705f4fc069Sjilinxpd #include <vm/pvn.h> 715f4fc069Sjilinxpd #include <vm/seg.h> 725f4fc069Sjilinxpd #include <vm/seg_map.h> 735f4fc069Sjilinxpd #include <vm/seg_kpm.h> 745f4fc069Sjilinxpd #include <vm/seg_vn.h> 75*8329232eSGordon Ross #endif // _KERNEL 764bff34e3Sthurlow 774bff34e3Sthurlow #include <netsmb/smb_osdep.h> 784bff34e3Sthurlow #include <netsmb/smb.h> 794bff34e3Sthurlow #include <netsmb/smb_conn.h> 804bff34e3Sthurlow #include <netsmb/smb_subr.h> 814bff34e3Sthurlow 824bff34e3Sthurlow #include <smbfs/smbfs.h> 834bff34e3Sthurlow #include <smbfs/smbfs_node.h> 844bff34e3Sthurlow #include <smbfs/smbfs_subr.h> 854bff34e3Sthurlow 867568150aSgwr #include <sys/fs/smbfs_ioctl.h> 874bff34e3Sthurlow #include <fs/fs_subr.h> 884bff34e3Sthurlow 89*8329232eSGordon Ross #ifndef MAXOFF32_T 90*8329232eSGordon Ross #define MAXOFF32_T 0x7fffffff 91*8329232eSGordon Ross #endif 92*8329232eSGordon Ross 935ecede33SGordon Ross /* 945ecede33SGordon Ross * We assign directory offsets like the NFS client, where the 955ecede33SGordon Ross * offset increments by _one_ after each directory entry. 965ecede33SGordon Ross * Further, the entries "." and ".." are always at offsets 975ecede33SGordon Ross * zero and one (respectively) and the "real" entries from 985ecede33SGordon Ross * the server appear at offsets starting with two. This 995ecede33SGordon Ross * macro is used to initialize the n_dirofs field after 1005ecede33SGordon Ross * setting n_dirseq with a _findopen call. 1015ecede33SGordon Ross */ 1025ecede33SGordon Ross #define FIRST_DIROFS 2 1035ecede33SGordon Ross 1044bff34e3Sthurlow /* 1054bff34e3Sthurlow * These characters are illegal in NTFS file names. 1064bff34e3Sthurlow * ref: http://support.microsoft.com/kb/147438 10791d632c8Sgwr * 10891d632c8Sgwr * Careful! The check in the XATTR case skips the 10991d632c8Sgwr * first character to allow colon in XATTR names. 1104bff34e3Sthurlow */ 1114bff34e3Sthurlow static const char illegal_chars[] = { 11291d632c8Sgwr ':', /* colon - keep this first! */ 1134bff34e3Sthurlow '\\', /* back slash */ 1144bff34e3Sthurlow '/', /* slash */ 1154bff34e3Sthurlow '*', /* asterisk */ 1164bff34e3Sthurlow '?', /* question mark */ 1174bff34e3Sthurlow '"', /* double quote */ 1184bff34e3Sthurlow '<', /* less than sign */ 1194bff34e3Sthurlow '>', /* greater than sign */ 1204bff34e3Sthurlow '|', /* vertical bar */ 1214bff34e3Sthurlow 0 1224bff34e3Sthurlow }; 1234bff34e3Sthurlow 1244bff34e3Sthurlow /* 1254bff34e3Sthurlow * Turning this on causes nodes to be created in the cache 12602d09e03SGordon Ross * during directory listings, normally avoiding a second 12702d09e03SGordon Ross * OtW attribute fetch just after a readdir. 1284bff34e3Sthurlow */ 12902d09e03SGordon Ross int smbfs_fastlookup = 1; 1304bff34e3Sthurlow 1315f4fc069Sjilinxpd struct vnodeops *smbfs_vnodeops = NULL; 1325f4fc069Sjilinxpd 1334bff34e3Sthurlow /* local static function defines */ 1344bff34e3Sthurlow 13502d09e03SGordon Ross static int smbfslookup_cache(vnode_t *, char *, int, vnode_t **, 13602d09e03SGordon Ross cred_t *); 1374bff34e3Sthurlow static int smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr, 13802d09e03SGordon Ross int cache_ok, caller_context_t *); 139ff1e230cSjilinxpd static int smbfsremove(vnode_t *dvp, vnode_t *vp, struct smb_cred *scred, 140ff1e230cSjilinxpd int flags); 141ff1e230cSjilinxpd static int smbfsrename(vnode_t *odvp, vnode_t *ovp, vnode_t *ndvp, 142ff1e230cSjilinxpd char *nnm, struct smb_cred *scred, int flags); 1434bff34e3Sthurlow static int smbfssetattr(vnode_t *, struct vattr *, int, cred_t *); 1444bff34e3Sthurlow static int smbfs_accessx(void *, int, cred_t *); 1454bff34e3Sthurlow static int smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, 1464bff34e3Sthurlow caller_context_t *); 14742d15982SGordon Ross static void smbfs_rele_fid(smbnode_t *, struct smb_cred *); 14828162916SGordon Ross static uint32_t xvattr_to_dosattr(smbnode_t *, struct vattr *); 14942d15982SGordon Ross 1505f4fc069Sjilinxpd static int smbfs_fsync(vnode_t *, int, cred_t *, caller_context_t *); 151*8329232eSGordon Ross 1525f4fc069Sjilinxpd static int smbfs_putpage(vnode_t *, offset_t, size_t, int, cred_t *, 1535f4fc069Sjilinxpd caller_context_t *); 154*8329232eSGordon Ross #ifdef _KERNEL 1555f4fc069Sjilinxpd static int smbfs_getapage(vnode_t *, u_offset_t, size_t, uint_t *, 1565f4fc069Sjilinxpd page_t *[], size_t, struct seg *, caddr_t, 1575f4fc069Sjilinxpd enum seg_rw, cred_t *); 1585f4fc069Sjilinxpd static int smbfs_putapage(vnode_t *, page_t *, u_offset_t *, size_t *, 1595f4fc069Sjilinxpd int, cred_t *); 1604e72ade1SGordon Ross static void smbfs_delmap_async(void *); 1615f4fc069Sjilinxpd 162*8329232eSGordon Ross static int smbfs_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int, 163*8329232eSGordon Ross cred_t *); 164*8329232eSGordon Ross static int smbfs_bio(struct buf *, int, cred_t *); 165*8329232eSGordon Ross static int smbfs_writenp(smbnode_t *np, caddr_t base, int tcount, 166*8329232eSGordon Ross struct uio *uiop, int pgcreated); 167*8329232eSGordon Ross #endif // _KERNEL 168*8329232eSGordon Ross 1695f4fc069Sjilinxpd /* 1705f4fc069Sjilinxpd * Error flags used to pass information about certain special errors 1715f4fc069Sjilinxpd * which need to be handled specially. 1725f4fc069Sjilinxpd */ 1735f4fc069Sjilinxpd #define SMBFS_EOF -98 1745f4fc069Sjilinxpd 1755f4fc069Sjilinxpd /* When implementing OtW locks, make this a real function. */ 1765f4fc069Sjilinxpd #define smbfs_lm_has_sleep(vp) 0 1775f4fc069Sjilinxpd 1784bff34e3Sthurlow /* 1794bff34e3Sthurlow * These are the vnode ops routines which implement the vnode interface to 1804bff34e3Sthurlow * the networked file system. These routines just take their parameters, 1814bff34e3Sthurlow * make them look networkish by putting the right info into interface structs, 1824bff34e3Sthurlow * and then calling the appropriate remote routine(s) to do the work. 1834bff34e3Sthurlow * 1844bff34e3Sthurlow * Note on directory name lookup cacheing: If we detect a stale fhandle, 1854bff34e3Sthurlow * we purge the directory cache relative to that vnode. This way, the 1864bff34e3Sthurlow * user won't get burned by the cache repeatedly. See <smbfs/smbnode.h> for 1874bff34e3Sthurlow * more details on smbnode locking. 1884bff34e3Sthurlow */ 1894bff34e3Sthurlow 1904bff34e3Sthurlow 1914bff34e3Sthurlow /* 1924bff34e3Sthurlow * XXX 1934bff34e3Sthurlow * When new and relevant functionality is enabled, we should be 1944bff34e3Sthurlow * calling vfs_set_feature() to inform callers that pieces of 1959660e5cbSJanice Chang * functionality are available, per PSARC 2007/227. 1964bff34e3Sthurlow */ 1974bff34e3Sthurlow /* ARGSUSED */ 1984bff34e3Sthurlow static int 1994bff34e3Sthurlow smbfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) 2004bff34e3Sthurlow { 2014bff34e3Sthurlow smbnode_t *np; 2024bff34e3Sthurlow vnode_t *vp; 20302d09e03SGordon Ross smbfattr_t fa; 2044bff34e3Sthurlow u_int32_t rights, rightsrcvd; 2054bff34e3Sthurlow u_int16_t fid, oldfid; 206613a2f6bSGordon Ross int oldgenid; 2074bff34e3Sthurlow struct smb_cred scred; 2084bff34e3Sthurlow smbmntinfo_t *smi; 209613a2f6bSGordon Ross smb_share_t *ssp; 2104bff34e3Sthurlow cred_t *oldcr; 2114bff34e3Sthurlow int tmperror; 2124bff34e3Sthurlow int error = 0; 2134bff34e3Sthurlow 2144bff34e3Sthurlow vp = *vpp; 2154bff34e3Sthurlow np = VTOSMB(vp); 2164bff34e3Sthurlow smi = VTOSMI(vp); 217613a2f6bSGordon Ross ssp = smi->smi_share; 2184bff34e3Sthurlow 219a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 2204bff34e3Sthurlow return (EIO); 2214bff34e3Sthurlow 2224bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 2234bff34e3Sthurlow return (EIO); 2244bff34e3Sthurlow 2254bff34e3Sthurlow if (vp->v_type != VREG && vp->v_type != VDIR) { /* XXX VLNK? */ 2264bff34e3Sthurlow SMBVDEBUG("open eacces vtype=%d\n", vp->v_type); 2274bff34e3Sthurlow return (EACCES); 2284bff34e3Sthurlow } 2294bff34e3Sthurlow 2304bff34e3Sthurlow /* 2314bff34e3Sthurlow * Get exclusive access to n_fid and related stuff. 2324bff34e3Sthurlow * No returns after this until out. 2334bff34e3Sthurlow */ 2344bff34e3Sthurlow if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp))) 2354bff34e3Sthurlow return (EINTR); 236613a2f6bSGordon Ross smb_credinit(&scred, cr); 2374bff34e3Sthurlow 23891d632c8Sgwr /* 23991d632c8Sgwr * Keep track of the vnode type at first open. 24091d632c8Sgwr * It may change later, and we need close to do 24191d632c8Sgwr * cleanup for the type we opened. Also deny 24291d632c8Sgwr * open of new types until old type is closed. 24391d632c8Sgwr */ 24491d632c8Sgwr if (np->n_ovtype == VNON) { 24591d632c8Sgwr ASSERT(np->n_dirrefs == 0); 24691d632c8Sgwr ASSERT(np->n_fidrefs == 0); 24791d632c8Sgwr } else if (np->n_ovtype != vp->v_type) { 24891d632c8Sgwr SMBVDEBUG("open n_ovtype=%d v_type=%d\n", 24991d632c8Sgwr np->n_ovtype, vp->v_type); 25091d632c8Sgwr error = EACCES; 25191d632c8Sgwr goto out; 25291d632c8Sgwr } 25391d632c8Sgwr 2544bff34e3Sthurlow /* 2555ecede33SGordon Ross * Directory open. See smbfs_readvdir() 2564bff34e3Sthurlow */ 2574bff34e3Sthurlow if (vp->v_type == VDIR) { 2585ecede33SGordon Ross if (np->n_dirseq == NULL) { 2595ecede33SGordon Ross /* first open */ 2605ecede33SGordon Ross error = smbfs_smb_findopen(np, "*", 1, 2615ecede33SGordon Ross SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR, 2625ecede33SGordon Ross &scred, &np->n_dirseq); 2635ecede33SGordon Ross if (error != 0) 2645ecede33SGordon Ross goto out; 2655ecede33SGordon Ross } 2665ecede33SGordon Ross np->n_dirofs = FIRST_DIROFS; 2674bff34e3Sthurlow np->n_dirrefs++; 2684bff34e3Sthurlow goto have_fid; 2694bff34e3Sthurlow } 2704bff34e3Sthurlow 2714bff34e3Sthurlow /* 2724bff34e3Sthurlow * If caller specified O_TRUNC/FTRUNC, then be sure to set 2734bff34e3Sthurlow * FWRITE (to drive successful setattr(size=0) after open) 2744bff34e3Sthurlow */ 2754bff34e3Sthurlow if (flag & FTRUNC) 2764bff34e3Sthurlow flag |= FWRITE; 2774bff34e3Sthurlow 2784bff34e3Sthurlow /* 279613a2f6bSGordon Ross * If we already have it open, and the FID is still valid, 280613a2f6bSGordon Ross * check whether the rights are sufficient for FID reuse. 2814bff34e3Sthurlow */ 282613a2f6bSGordon Ross if (np->n_fidrefs > 0 && 283613a2f6bSGordon Ross np->n_vcgenid == ssp->ss_vcgenid) { 2844bff34e3Sthurlow int upgrade = 0; 2854bff34e3Sthurlow 2864bff34e3Sthurlow if ((flag & FWRITE) && 28702d09e03SGordon Ross !(np->n_rights & SA_RIGHT_FILE_WRITE_DATA)) 2884bff34e3Sthurlow upgrade = 1; 2894bff34e3Sthurlow if ((flag & FREAD) && 29002d09e03SGordon Ross !(np->n_rights & SA_RIGHT_FILE_READ_DATA)) 2914bff34e3Sthurlow upgrade = 1; 2924bff34e3Sthurlow if (!upgrade) { 2934bff34e3Sthurlow /* 2944bff34e3Sthurlow * the existing open is good enough 2954bff34e3Sthurlow */ 2964bff34e3Sthurlow np->n_fidrefs++; 2974bff34e3Sthurlow goto have_fid; 2984bff34e3Sthurlow } 2994bff34e3Sthurlow } 3004bff34e3Sthurlow rights = np->n_fidrefs ? np->n_rights : 0; 3014bff34e3Sthurlow 3024bff34e3Sthurlow /* 3034bff34e3Sthurlow * we always ask for READ_CONTROL so we can always get the 30491d632c8Sgwr * owner/group IDs to satisfy a stat. Ditto attributes. 3054bff34e3Sthurlow */ 30691d632c8Sgwr rights |= (STD_RIGHT_READ_CONTROL_ACCESS | 30791d632c8Sgwr SA_RIGHT_FILE_READ_ATTRIBUTES); 3084bff34e3Sthurlow if ((flag & FREAD)) 3094bff34e3Sthurlow rights |= SA_RIGHT_FILE_READ_DATA; 3104bff34e3Sthurlow if ((flag & FWRITE)) 31102d09e03SGordon Ross rights |= SA_RIGHT_FILE_WRITE_DATA | 31202d09e03SGordon Ross SA_RIGHT_FILE_APPEND_DATA | 31302d09e03SGordon Ross SA_RIGHT_FILE_WRITE_ATTRIBUTES; 31402d09e03SGordon Ross 31502d09e03SGordon Ross bzero(&fa, sizeof (fa)); 31602d09e03SGordon Ross error = smbfs_smb_open(np, 31702d09e03SGordon Ross NULL, 0, 0, /* name nmlen xattr */ 31802d09e03SGordon Ross rights, &scred, 31902d09e03SGordon Ross &fid, &rightsrcvd, &fa); 3204bff34e3Sthurlow if (error) 3214bff34e3Sthurlow goto out; 32202d09e03SGordon Ross smbfs_attrcache_fa(vp, &fa); 3234bff34e3Sthurlow 3244bff34e3Sthurlow /* 3254bff34e3Sthurlow * We have a new FID and access rights. 3264bff34e3Sthurlow */ 3274bff34e3Sthurlow oldfid = np->n_fid; 328613a2f6bSGordon Ross oldgenid = np->n_vcgenid; 3294bff34e3Sthurlow np->n_fid = fid; 330613a2f6bSGordon Ross np->n_vcgenid = ssp->ss_vcgenid; 3314bff34e3Sthurlow np->n_rights = rightsrcvd; 3324bff34e3Sthurlow np->n_fidrefs++; 333613a2f6bSGordon Ross if (np->n_fidrefs > 1 && 334613a2f6bSGordon Ross oldgenid == ssp->ss_vcgenid) { 3354bff34e3Sthurlow /* 3364bff34e3Sthurlow * We already had it open (presumably because 3374bff34e3Sthurlow * it was open with insufficient rights.) 3384bff34e3Sthurlow * Close old wire-open. 3394bff34e3Sthurlow */ 340613a2f6bSGordon Ross tmperror = smbfs_smb_close(ssp, 34102d09e03SGordon Ross oldfid, NULL, &scred); 3424bff34e3Sthurlow if (tmperror) 3434bff34e3Sthurlow SMBVDEBUG("error %d closing %s\n", 3444bff34e3Sthurlow tmperror, np->n_rpath); 3454bff34e3Sthurlow } 3464bff34e3Sthurlow 3474bff34e3Sthurlow /* 3484bff34e3Sthurlow * This thread did the open. 3494bff34e3Sthurlow * Save our credentials too. 3504bff34e3Sthurlow */ 3514bff34e3Sthurlow mutex_enter(&np->r_statelock); 3524bff34e3Sthurlow oldcr = np->r_cred; 3534bff34e3Sthurlow np->r_cred = cr; 3544bff34e3Sthurlow crhold(cr); 3554bff34e3Sthurlow if (oldcr) 3564bff34e3Sthurlow crfree(oldcr); 3574bff34e3Sthurlow mutex_exit(&np->r_statelock); 3584bff34e3Sthurlow 3594bff34e3Sthurlow have_fid: 36091d632c8Sgwr /* 36191d632c8Sgwr * Keep track of the vnode type at first open. 36291d632c8Sgwr * (see comments above) 36391d632c8Sgwr */ 36491d632c8Sgwr if (np->n_ovtype == VNON) 36591d632c8Sgwr np->n_ovtype = vp->v_type; 3664bff34e3Sthurlow 3674bff34e3Sthurlow out: 3684bff34e3Sthurlow smb_credrele(&scred); 3694bff34e3Sthurlow smbfs_rw_exit(&np->r_lkserlock); 3704bff34e3Sthurlow return (error); 3714bff34e3Sthurlow } 3724bff34e3Sthurlow 3734bff34e3Sthurlow /*ARGSUSED*/ 3744bff34e3Sthurlow static int 3754bff34e3Sthurlow smbfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr, 3764bff34e3Sthurlow caller_context_t *ct) 3774bff34e3Sthurlow { 3784bff34e3Sthurlow smbnode_t *np; 379613a2f6bSGordon Ross smbmntinfo_t *smi; 3804bff34e3Sthurlow struct smb_cred scred; 3815f4fc069Sjilinxpd int error = 0; 3824bff34e3Sthurlow 3834bff34e3Sthurlow np = VTOSMB(vp); 384613a2f6bSGordon Ross smi = VTOSMI(vp); 3854bff34e3Sthurlow 3864bff34e3Sthurlow /* 3874bff34e3Sthurlow * Don't "bail out" for VFS_UNMOUNTED here, 3884bff34e3Sthurlow * as we want to do cleanup, etc. 3894bff34e3Sthurlow */ 3904bff34e3Sthurlow 3914bff34e3Sthurlow /* 3924bff34e3Sthurlow * zone_enter(2) prevents processes from changing zones with SMBFS files 3934bff34e3Sthurlow * open; if we happen to get here from the wrong zone we can't do 3944bff34e3Sthurlow * anything over the wire. 3954bff34e3Sthurlow */ 396a19609f8Sjv if (smi->smi_zone_ref.zref_zone != curproc->p_zone) { 3974bff34e3Sthurlow /* 3984bff34e3Sthurlow * We could attempt to clean up locks, except we're sure 3994bff34e3Sthurlow * that the current process didn't acquire any locks on 4004bff34e3Sthurlow * the file: any attempt to lock a file belong to another zone 4014bff34e3Sthurlow * will fail, and one can't lock an SMBFS file and then change 4024bff34e3Sthurlow * zones, as that fails too. 4034bff34e3Sthurlow * 4044bff34e3Sthurlow * Returning an error here is the sane thing to do. A 4054bff34e3Sthurlow * subsequent call to VN_RELE() which translates to a 4064bff34e3Sthurlow * smbfs_inactive() will clean up state: if the zone of the 4074bff34e3Sthurlow * vnode's origin is still alive and kicking, an async worker 4084bff34e3Sthurlow * thread will handle the request (from the correct zone), and 4094bff34e3Sthurlow * everything (minus the final smbfs_getattr_otw() call) should 4104bff34e3Sthurlow * be OK. If the zone is going away smbfs_async_inactive() will 4114bff34e3Sthurlow * throw away cached pages inline. 4124bff34e3Sthurlow */ 4134bff34e3Sthurlow return (EIO); 4144bff34e3Sthurlow } 4154bff34e3Sthurlow 4164bff34e3Sthurlow /* 4174bff34e3Sthurlow * If we are using local locking for this filesystem, then 4184bff34e3Sthurlow * release all of the SYSV style record locks. Otherwise, 4194bff34e3Sthurlow * we are doing network locking and we need to release all 4204bff34e3Sthurlow * of the network locks. All of the locks held by this 4214bff34e3Sthurlow * process on this file are released no matter what the 4224bff34e3Sthurlow * incoming reference count is. 4234bff34e3Sthurlow */ 42442d15982SGordon Ross if (smi->smi_flags & SMI_LLOCK) { 425613a2f6bSGordon Ross pid_t pid = ddi_get_pid(); 426613a2f6bSGordon Ross cleanlocks(vp, pid, 0); 427613a2f6bSGordon Ross cleanshares(vp, pid); 4284bff34e3Sthurlow } 4295f4fc069Sjilinxpd /* 4305f4fc069Sjilinxpd * else doing OtW locking. SMB servers drop all locks 4315f4fc069Sjilinxpd * on the file ID we close here, so no _lockrelease() 4325f4fc069Sjilinxpd */ 4334bff34e3Sthurlow 4344bff34e3Sthurlow /* 43502d09e03SGordon Ross * This (passed in) count is the ref. count from the 43602d09e03SGordon Ross * user's file_t before the closef call (fio.c). 4375f4fc069Sjilinxpd * The rest happens only on last close. 4384bff34e3Sthurlow */ 43902d09e03SGordon Ross if (count > 1) 44002d09e03SGordon Ross return (0); 4414bff34e3Sthurlow 4425f4fc069Sjilinxpd /* NFS has DNLC purge here. */ 4435f4fc069Sjilinxpd 4445f4fc069Sjilinxpd /* 4455f4fc069Sjilinxpd * If the file was open for write and there are pages, 4465f4fc069Sjilinxpd * then make sure dirty pages written back. 4475f4fc069Sjilinxpd * 4485f4fc069Sjilinxpd * NFS does this async when "close-to-open" is off 4495f4fc069Sjilinxpd * (MI_NOCTO flag is set) to avoid blocking the caller. 4505f4fc069Sjilinxpd * For now, always do this synchronously (no B_ASYNC). 4515f4fc069Sjilinxpd */ 4525f4fc069Sjilinxpd if ((flag & FWRITE) && vn_has_cached_data(vp)) { 4535f4fc069Sjilinxpd error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct); 4545f4fc069Sjilinxpd if (error == EAGAIN) 4555f4fc069Sjilinxpd error = 0; 4565f4fc069Sjilinxpd } 4575f4fc069Sjilinxpd if (error == 0) { 4585f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 4595f4fc069Sjilinxpd np->r_flags &= ~RSTALE; 4605f4fc069Sjilinxpd np->r_error = 0; 4615f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 4625f4fc069Sjilinxpd } 4635f4fc069Sjilinxpd 4644bff34e3Sthurlow /* 46542d15982SGordon Ross * Decrement the reference count for the FID 46642d15982SGordon Ross * and possibly do the OtW close. 46742d15982SGordon Ross * 4684bff34e3Sthurlow * Exclusive lock for modifying n_fid stuff. 4694bff34e3Sthurlow * Don't want this one ever interruptible. 4704bff34e3Sthurlow */ 4714bff34e3Sthurlow (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0); 472613a2f6bSGordon Ross smb_credinit(&scred, cr); 4734bff34e3Sthurlow 47442d15982SGordon Ross smbfs_rele_fid(np, &scred); 47542d15982SGordon Ross 47642d15982SGordon Ross smb_credrele(&scred); 47742d15982SGordon Ross smbfs_rw_exit(&np->r_lkserlock); 47842d15982SGordon Ross 47942d15982SGordon Ross return (0); 48042d15982SGordon Ross } 48142d15982SGordon Ross 48242d15982SGordon Ross /* 48342d15982SGordon Ross * Helper for smbfs_close. Decrement the reference count 48442d15982SGordon Ross * for an SMB-level file or directory ID, and when the last 48542d15982SGordon Ross * reference for the fid goes away, do the OtW close. 48642d15982SGordon Ross * Also called in smbfs_inactive (defensive cleanup). 48742d15982SGordon Ross */ 48842d15982SGordon Ross static void 48942d15982SGordon Ross smbfs_rele_fid(smbnode_t *np, struct smb_cred *scred) 49042d15982SGordon Ross { 49142d15982SGordon Ross smb_share_t *ssp; 49242d15982SGordon Ross cred_t *oldcr; 49342d15982SGordon Ross struct smbfs_fctx *fctx; 49442d15982SGordon Ross int error; 49542d15982SGordon Ross uint16_t ofid; 49642d15982SGordon Ross 49742d15982SGordon Ross ssp = np->n_mount->smi_share; 4984bff34e3Sthurlow error = 0; 49991d632c8Sgwr 50042d15982SGordon Ross /* Make sure we serialize for n_dirseq use. */ 50142d15982SGordon Ross ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER)); 50242d15982SGordon Ross 50391d632c8Sgwr /* 50491d632c8Sgwr * Note that vp->v_type may change if a remote node 50591d632c8Sgwr * is deleted and recreated as a different type, and 50691d632c8Sgwr * our getattr may change v_type accordingly. 50791d632c8Sgwr * Now use n_ovtype to keep track of the v_type 50891d632c8Sgwr * we had during open (see comments above). 50991d632c8Sgwr */ 51042d15982SGordon Ross switch (np->n_ovtype) { 51142d15982SGordon Ross case VDIR: 5124bff34e3Sthurlow ASSERT(np->n_dirrefs > 0); 5134bff34e3Sthurlow if (--np->n_dirrefs) 51442d15982SGordon Ross return; 5154bff34e3Sthurlow if ((fctx = np->n_dirseq) != NULL) { 5164bff34e3Sthurlow np->n_dirseq = NULL; 5175ecede33SGordon Ross np->n_dirofs = 0; 51842d15982SGordon Ross error = smbfs_smb_findclose(fctx, scred); 5194bff34e3Sthurlow } 52042d15982SGordon Ross break; 52142d15982SGordon Ross 52242d15982SGordon Ross case VREG: 5234bff34e3Sthurlow ASSERT(np->n_fidrefs > 0); 5244bff34e3Sthurlow if (--np->n_fidrefs) 52542d15982SGordon Ross return; 5264bff34e3Sthurlow if ((ofid = np->n_fid) != SMB_FID_UNUSED) { 5274bff34e3Sthurlow np->n_fid = SMB_FID_UNUSED; 528613a2f6bSGordon Ross /* After reconnect, n_fid is invalid */ 529613a2f6bSGordon Ross if (np->n_vcgenid == ssp->ss_vcgenid) { 530613a2f6bSGordon Ross error = smbfs_smb_close( 53142d15982SGordon Ross ssp, ofid, NULL, scred); 532613a2f6bSGordon Ross } 5334bff34e3Sthurlow } 53442d15982SGordon Ross break; 53542d15982SGordon Ross 53642d15982SGordon Ross default: 53742d15982SGordon Ross SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype); 53842d15982SGordon Ross break; 5394bff34e3Sthurlow } 5404bff34e3Sthurlow if (error) { 54102d09e03SGordon Ross SMBVDEBUG("error %d closing %s\n", 5424bff34e3Sthurlow error, np->n_rpath); 5434bff34e3Sthurlow } 5444bff34e3Sthurlow 54591d632c8Sgwr /* Allow next open to use any v_type. */ 54691d632c8Sgwr np->n_ovtype = VNON; 54791d632c8Sgwr 54802d09e03SGordon Ross /* 54902d09e03SGordon Ross * Other "last close" stuff. 55002d09e03SGordon Ross */ 55102d09e03SGordon Ross mutex_enter(&np->r_statelock); 5524bff34e3Sthurlow if (np->n_flag & NATTRCHANGED) 55302d09e03SGordon Ross smbfs_attrcache_rm_locked(np); 55402d09e03SGordon Ross oldcr = np->r_cred; 55502d09e03SGordon Ross np->r_cred = NULL; 55602d09e03SGordon Ross mutex_exit(&np->r_statelock); 55702d09e03SGordon Ross if (oldcr != NULL) 55802d09e03SGordon Ross crfree(oldcr); 5594bff34e3Sthurlow } 5604bff34e3Sthurlow 5614bff34e3Sthurlow /* ARGSUSED */ 5624bff34e3Sthurlow static int 5634bff34e3Sthurlow smbfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr, 5644bff34e3Sthurlow caller_context_t *ct) 5654bff34e3Sthurlow { 5669c9af259SGordon Ross struct smb_cred scred; 5679c9af259SGordon Ross struct vattr va; 568613a2f6bSGordon Ross smbnode_t *np; 569613a2f6bSGordon Ross smbmntinfo_t *smi; 570613a2f6bSGordon Ross smb_share_t *ssp; 5719c9af259SGordon Ross offset_t endoff; 5729c9af259SGordon Ross ssize_t past_eof; 5739c9af259SGordon Ross int error; 5744bff34e3Sthurlow 5754bff34e3Sthurlow np = VTOSMB(vp); 5764bff34e3Sthurlow smi = VTOSMI(vp); 577613a2f6bSGordon Ross ssp = smi->smi_share; 5784bff34e3Sthurlow 579a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 5804bff34e3Sthurlow return (EIO); 5814bff34e3Sthurlow 5824bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 5834bff34e3Sthurlow return (EIO); 5844bff34e3Sthurlow 5854bff34e3Sthurlow ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER)); 5864bff34e3Sthurlow 5874bff34e3Sthurlow if (vp->v_type != VREG) 5884bff34e3Sthurlow return (EISDIR); 5894bff34e3Sthurlow 5904bff34e3Sthurlow if (uiop->uio_resid == 0) 5914bff34e3Sthurlow return (0); 5924bff34e3Sthurlow 5934bff34e3Sthurlow /* 5944bff34e3Sthurlow * Like NFS3, just check for 63-bit overflow. 5954bff34e3Sthurlow * Our SMB layer takes care to return EFBIG 5964bff34e3Sthurlow * when it has to fallback to a 32-bit call. 5974bff34e3Sthurlow */ 598613a2f6bSGordon Ross endoff = uiop->uio_loffset + uiop->uio_resid; 599613a2f6bSGordon Ross if (uiop->uio_loffset < 0 || endoff < 0) 6004bff34e3Sthurlow return (EINVAL); 6014bff34e3Sthurlow 6024bff34e3Sthurlow /* get vnode attributes from server */ 6034bff34e3Sthurlow va.va_mask = AT_SIZE | AT_MTIME; 6044bff34e3Sthurlow if (error = smbfsgetattr(vp, &va, cr)) 6059c9af259SGordon Ross return (error); 6064bff34e3Sthurlow 6079c9af259SGordon Ross /* Update mtime with mtime from server here? */ 6089c9af259SGordon Ross 6099c9af259SGordon Ross /* if offset is beyond EOF, read nothing */ 6109c9af259SGordon Ross if (uiop->uio_loffset >= va.va_size) 6119c9af259SGordon Ross return (0); 6124bff34e3Sthurlow 6134bff34e3Sthurlow /* 6149c9af259SGordon Ross * Limit the read to the remaining file size. 6159c9af259SGordon Ross * Do this by temporarily reducing uio_resid 6169c9af259SGordon Ross * by the amount the lies beyoned the EOF. 6174bff34e3Sthurlow */ 6189c9af259SGordon Ross if (endoff > va.va_size) { 6199c9af259SGordon Ross past_eof = (ssize_t)(endoff - va.va_size); 6209c9af259SGordon Ross uiop->uio_resid -= past_eof; 6219c9af259SGordon Ross } else 6229c9af259SGordon Ross past_eof = 0; 6239c9af259SGordon Ross 6245f4fc069Sjilinxpd /* 6255f4fc069Sjilinxpd * Bypass VM if caching has been disabled (e.g., locking) or if 6265f4fc069Sjilinxpd * using client-side direct I/O and the file is not mmap'd and 6275f4fc069Sjilinxpd * there are no cached pages. 6285f4fc069Sjilinxpd */ 6295f4fc069Sjilinxpd if ((vp->v_flag & VNOCACHE) || 6305f4fc069Sjilinxpd (((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) && 6315f4fc069Sjilinxpd np->r_mapcnt == 0 && np->r_inmap == 0 && 6325f4fc069Sjilinxpd !vn_has_cached_data(vp))) { 6334bff34e3Sthurlow 6345f4fc069Sjilinxpd /* Shared lock for n_fid use in smb_rwuio */ 6355f4fc069Sjilinxpd if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp))) 6365f4fc069Sjilinxpd return (EINTR); 6375f4fc069Sjilinxpd smb_credinit(&scred, cr); 6389c9af259SGordon Ross 6395f4fc069Sjilinxpd /* After reconnect, n_fid is invalid */ 6405f4fc069Sjilinxpd if (np->n_vcgenid != ssp->ss_vcgenid) 6415f4fc069Sjilinxpd error = ESTALE; 6425f4fc069Sjilinxpd else 6435f4fc069Sjilinxpd error = smb_rwuio(ssp, np->n_fid, UIO_READ, 6445f4fc069Sjilinxpd uiop, &scred, smb_timo_read); 6455f4fc069Sjilinxpd 6465f4fc069Sjilinxpd smb_credrele(&scred); 6475f4fc069Sjilinxpd smbfs_rw_exit(&np->r_lkserlock); 6485f4fc069Sjilinxpd 6495f4fc069Sjilinxpd /* undo adjustment of resid */ 6505f4fc069Sjilinxpd uiop->uio_resid += past_eof; 6515f4fc069Sjilinxpd 6525f4fc069Sjilinxpd return (error); 6535f4fc069Sjilinxpd } 6545f4fc069Sjilinxpd 655*8329232eSGordon Ross #ifdef _KERNEL 6565f4fc069Sjilinxpd /* (else) Do I/O through segmap. */ 6575f4fc069Sjilinxpd do { 658*8329232eSGordon Ross caddr_t base; 659*8329232eSGordon Ross u_offset_t off; 660*8329232eSGordon Ross size_t n; 661*8329232eSGordon Ross int on; 662*8329232eSGordon Ross uint_t flags; 663*8329232eSGordon Ross 6645f4fc069Sjilinxpd off = uiop->uio_loffset & MAXBMASK; /* mapping offset */ 6655f4fc069Sjilinxpd on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */ 6665f4fc069Sjilinxpd n = MIN(MAXBSIZE - on, uiop->uio_resid); 6675f4fc069Sjilinxpd 6685f4fc069Sjilinxpd error = smbfs_validate_caches(vp, cr); 6695f4fc069Sjilinxpd if (error) 6705f4fc069Sjilinxpd break; 6715f4fc069Sjilinxpd 6725f4fc069Sjilinxpd /* NFS waits for RINCACHEPURGE here. */ 6735f4fc069Sjilinxpd 6745f4fc069Sjilinxpd if (vpm_enable) { 6755f4fc069Sjilinxpd /* 6765f4fc069Sjilinxpd * Copy data. 6775f4fc069Sjilinxpd */ 6785f4fc069Sjilinxpd error = vpm_data_copy(vp, off + on, n, uiop, 6795f4fc069Sjilinxpd 1, NULL, 0, S_READ); 6805f4fc069Sjilinxpd } else { 6815f4fc069Sjilinxpd base = segmap_getmapflt(segkmap, vp, off + on, n, 1, 6825f4fc069Sjilinxpd S_READ); 6835f4fc069Sjilinxpd 6845f4fc069Sjilinxpd error = uiomove(base + on, n, UIO_READ, uiop); 6855f4fc069Sjilinxpd } 6865f4fc069Sjilinxpd 6875f4fc069Sjilinxpd if (!error) { 6885f4fc069Sjilinxpd /* 6895f4fc069Sjilinxpd * If read a whole block or read to eof, 6905f4fc069Sjilinxpd * won't need this buffer again soon. 6915f4fc069Sjilinxpd */ 6925f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 6935f4fc069Sjilinxpd if (n + on == MAXBSIZE || 6945f4fc069Sjilinxpd uiop->uio_loffset == np->r_size) 6955f4fc069Sjilinxpd flags = SM_DONTNEED; 6965f4fc069Sjilinxpd else 6975f4fc069Sjilinxpd flags = 0; 6985f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 6995f4fc069Sjilinxpd if (vpm_enable) { 7005f4fc069Sjilinxpd error = vpm_sync_pages(vp, off, n, flags); 7015f4fc069Sjilinxpd } else { 7025f4fc069Sjilinxpd error = segmap_release(segkmap, base, flags); 7035f4fc069Sjilinxpd } 7045f4fc069Sjilinxpd } else { 7055f4fc069Sjilinxpd if (vpm_enable) { 7065f4fc069Sjilinxpd (void) vpm_sync_pages(vp, off, n, 0); 7075f4fc069Sjilinxpd } else { 7085f4fc069Sjilinxpd (void) segmap_release(segkmap, base, 0); 7095f4fc069Sjilinxpd } 7105f4fc069Sjilinxpd } 7115f4fc069Sjilinxpd } while (!error && uiop->uio_resid > 0); 712*8329232eSGordon Ross #else // _KERNEL 713*8329232eSGordon Ross error = ENOSYS; 714*8329232eSGordon Ross #endif // _KERNEL 7154bff34e3Sthurlow 7169c9af259SGordon Ross /* undo adjustment of resid */ 7179c9af259SGordon Ross uiop->uio_resid += past_eof; 7189c9af259SGordon Ross 7199c9af259SGordon Ross return (error); 7204bff34e3Sthurlow } 7214bff34e3Sthurlow 7224bff34e3Sthurlow 7234bff34e3Sthurlow /* ARGSUSED */ 7244bff34e3Sthurlow static int 7254bff34e3Sthurlow smbfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr, 7264bff34e3Sthurlow caller_context_t *ct) 7274bff34e3Sthurlow { 7289c9af259SGordon Ross struct smb_cred scred; 7295f4fc069Sjilinxpd struct vattr va; 730613a2f6bSGordon Ross smbnode_t *np; 731613a2f6bSGordon Ross smbmntinfo_t *smi; 732613a2f6bSGordon Ross smb_share_t *ssp; 7339c9af259SGordon Ross offset_t endoff, limit; 7349c9af259SGordon Ross ssize_t past_limit; 7359c9af259SGordon Ross int error, timo; 7365f4fc069Sjilinxpd u_offset_t last_off; 7375f4fc069Sjilinxpd size_t last_resid; 738*8329232eSGordon Ross #ifdef _KERNEL 7395f4fc069Sjilinxpd uint_t bsize; 740*8329232eSGordon Ross #endif 7414bff34e3Sthurlow 7424bff34e3Sthurlow np = VTOSMB(vp); 7434bff34e3Sthurlow smi = VTOSMI(vp); 744613a2f6bSGordon Ross ssp = smi->smi_share; 7454bff34e3Sthurlow 746a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 7474bff34e3Sthurlow return (EIO); 7484bff34e3Sthurlow 7494bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 7504bff34e3Sthurlow return (EIO); 7514bff34e3Sthurlow 7524bff34e3Sthurlow ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER)); 7534bff34e3Sthurlow 7544bff34e3Sthurlow if (vp->v_type != VREG) 7554bff34e3Sthurlow return (EISDIR); 7564bff34e3Sthurlow 7574bff34e3Sthurlow if (uiop->uio_resid == 0) 7584bff34e3Sthurlow return (0); 7594bff34e3Sthurlow 7609c9af259SGordon Ross /* 7619c9af259SGordon Ross * Handle ioflag bits: (FAPPEND|FSYNC|FDSYNC) 7629c9af259SGordon Ross */ 7639c9af259SGordon Ross if (ioflag & (FAPPEND | FSYNC)) { 7649c9af259SGordon Ross if (np->n_flag & NMODIFIED) { 76502d09e03SGordon Ross smbfs_attrcache_remove(np); 7669c9af259SGordon Ross } 7679c9af259SGordon Ross } 7689c9af259SGordon Ross if (ioflag & FAPPEND) { 7699c9af259SGordon Ross /* 7709c9af259SGordon Ross * File size can be changed by another client 7715f4fc069Sjilinxpd * 7725f4fc069Sjilinxpd * Todo: Consider redesigning this to use a 7735f4fc069Sjilinxpd * handle opened for append instead. 7749c9af259SGordon Ross */ 7759c9af259SGordon Ross va.va_mask = AT_SIZE; 7769c9af259SGordon Ross if (error = smbfsgetattr(vp, &va, cr)) 7779c9af259SGordon Ross return (error); 7789c9af259SGordon Ross uiop->uio_loffset = va.va_size; 7799c9af259SGordon Ross } 7804bff34e3Sthurlow 7819c9af259SGordon Ross /* 7829c9af259SGordon Ross * Like NFS3, just check for 63-bit overflow. 7839c9af259SGordon Ross */ 7849c9af259SGordon Ross endoff = uiop->uio_loffset + uiop->uio_resid; 7859c9af259SGordon Ross if (uiop->uio_loffset < 0 || endoff < 0) 7869c9af259SGordon Ross return (EINVAL); 7874bff34e3Sthurlow 7884bff34e3Sthurlow /* 7899c9af259SGordon Ross * Check to make sure that the process will not exceed 7909c9af259SGordon Ross * its limit on file size. It is okay to write up to 7919c9af259SGordon Ross * the limit, but not beyond. Thus, the write which 7929c9af259SGordon Ross * reaches the limit will be short and the next write 7939c9af259SGordon Ross * will return an error. 7949c9af259SGordon Ross * 7959c9af259SGordon Ross * So if we're starting at or beyond the limit, EFBIG. 7969c9af259SGordon Ross * Otherwise, temporarily reduce resid to the amount 7975f4fc069Sjilinxpd * that is after the limit. 7984bff34e3Sthurlow */ 7999c9af259SGordon Ross limit = uiop->uio_llimit; 8009c9af259SGordon Ross if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T) 8019c9af259SGordon Ross limit = MAXOFFSET_T; 8025f4fc069Sjilinxpd if (uiop->uio_loffset >= limit) { 803*8329232eSGordon Ross #ifdef _KERNEL 8045f4fc069Sjilinxpd proc_t *p = ttoproc(curthread); 8055f4fc069Sjilinxpd 8065f4fc069Sjilinxpd mutex_enter(&p->p_lock); 8075f4fc069Sjilinxpd (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], 8085f4fc069Sjilinxpd p->p_rctls, p, RCA_UNSAFE_SIGINFO); 8095f4fc069Sjilinxpd mutex_exit(&p->p_lock); 810*8329232eSGordon Ross #endif // _KERNEL 8119c9af259SGordon Ross return (EFBIG); 8125f4fc069Sjilinxpd } 8139c9af259SGordon Ross if (endoff > limit) { 8149c9af259SGordon Ross past_limit = (ssize_t)(endoff - limit); 8159c9af259SGordon Ross uiop->uio_resid -= past_limit; 8169c9af259SGordon Ross } else 8179c9af259SGordon Ross past_limit = 0; 8189c9af259SGordon Ross 8195f4fc069Sjilinxpd /* 8205f4fc069Sjilinxpd * Bypass VM if caching has been disabled (e.g., locking) or if 8215f4fc069Sjilinxpd * using client-side direct I/O and the file is not mmap'd and 8225f4fc069Sjilinxpd * there are no cached pages. 8235f4fc069Sjilinxpd */ 8245f4fc069Sjilinxpd if ((vp->v_flag & VNOCACHE) || 8255f4fc069Sjilinxpd (((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) && 8265f4fc069Sjilinxpd np->r_mapcnt == 0 && np->r_inmap == 0 && 8275f4fc069Sjilinxpd !vn_has_cached_data(vp))) { 8285f4fc069Sjilinxpd 829*8329232eSGordon Ross #ifdef _KERNEL 8305f4fc069Sjilinxpd smbfs_fwrite: 831*8329232eSGordon Ross #endif // _KERNEL 8325f4fc069Sjilinxpd if (np->r_flags & RSTALE) { 8335f4fc069Sjilinxpd last_resid = uiop->uio_resid; 8345f4fc069Sjilinxpd last_off = uiop->uio_loffset; 8355f4fc069Sjilinxpd error = np->r_error; 8365f4fc069Sjilinxpd /* 8375f4fc069Sjilinxpd * A close may have cleared r_error, if so, 8385f4fc069Sjilinxpd * propagate ESTALE error return properly 8395f4fc069Sjilinxpd */ 8405f4fc069Sjilinxpd if (error == 0) 8415f4fc069Sjilinxpd error = ESTALE; 8425f4fc069Sjilinxpd goto bottom; 8435f4fc069Sjilinxpd } 8445f4fc069Sjilinxpd 8455f4fc069Sjilinxpd /* Timeout: longer for append. */ 8465f4fc069Sjilinxpd timo = smb_timo_write; 8475f4fc069Sjilinxpd if (endoff > np->r_size) 8485f4fc069Sjilinxpd timo = smb_timo_append; 8499c9af259SGordon Ross 8505f4fc069Sjilinxpd /* Shared lock for n_fid use in smb_rwuio */ 8515f4fc069Sjilinxpd if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp))) 8525f4fc069Sjilinxpd return (EINTR); 8535f4fc069Sjilinxpd smb_credinit(&scred, cr); 8544bff34e3Sthurlow 8555f4fc069Sjilinxpd /* After reconnect, n_fid is invalid */ 8565f4fc069Sjilinxpd if (np->n_vcgenid != ssp->ss_vcgenid) 8575f4fc069Sjilinxpd error = ESTALE; 8585f4fc069Sjilinxpd else 8595f4fc069Sjilinxpd error = smb_rwuio(ssp, np->n_fid, UIO_WRITE, 8605f4fc069Sjilinxpd uiop, &scred, timo); 8619c9af259SGordon Ross 8625f4fc069Sjilinxpd if (error == 0) { 8635f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 8645f4fc069Sjilinxpd np->n_flag |= (NFLUSHWIRE | NATTRCHANGED); 8655f4fc069Sjilinxpd if (uiop->uio_loffset > (offset_t)np->r_size) 8665f4fc069Sjilinxpd np->r_size = (len_t)uiop->uio_loffset; 8675f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 8685f4fc069Sjilinxpd if (ioflag & (FSYNC | FDSYNC)) { 8695f4fc069Sjilinxpd /* Don't error the I/O if this fails. */ 8705f4fc069Sjilinxpd (void) smbfs_smb_flush(np, &scred); 8715f4fc069Sjilinxpd } 8725f4fc069Sjilinxpd } 8735f4fc069Sjilinxpd 8745f4fc069Sjilinxpd smb_credrele(&scred); 8755f4fc069Sjilinxpd smbfs_rw_exit(&np->r_lkserlock); 8765f4fc069Sjilinxpd 8775f4fc069Sjilinxpd /* undo adjustment of resid */ 8785f4fc069Sjilinxpd uiop->uio_resid += past_limit; 8795f4fc069Sjilinxpd 8805f4fc069Sjilinxpd return (error); 8815f4fc069Sjilinxpd } 8825f4fc069Sjilinxpd 883*8329232eSGordon Ross #ifdef _KERNEL 8845f4fc069Sjilinxpd /* (else) Do I/O through segmap. */ 8855f4fc069Sjilinxpd bsize = vp->v_vfsp->vfs_bsize; 8865f4fc069Sjilinxpd 8875f4fc069Sjilinxpd do { 888*8329232eSGordon Ross caddr_t base; 889*8329232eSGordon Ross u_offset_t off; 890*8329232eSGordon Ross size_t n; 891*8329232eSGordon Ross int on; 892*8329232eSGordon Ross uint_t flags; 893*8329232eSGordon Ross 8945f4fc069Sjilinxpd off = uiop->uio_loffset & MAXBMASK; /* mapping offset */ 8955f4fc069Sjilinxpd on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */ 8965f4fc069Sjilinxpd n = MIN(MAXBSIZE - on, uiop->uio_resid); 8975f4fc069Sjilinxpd 8985f4fc069Sjilinxpd last_resid = uiop->uio_resid; 8995f4fc069Sjilinxpd last_off = uiop->uio_loffset; 9005f4fc069Sjilinxpd 9015f4fc069Sjilinxpd if (np->r_flags & RSTALE) { 9025f4fc069Sjilinxpd error = np->r_error; 9035f4fc069Sjilinxpd /* 9045f4fc069Sjilinxpd * A close may have cleared r_error, if so, 9055f4fc069Sjilinxpd * propagate ESTALE error return properly 9065f4fc069Sjilinxpd */ 9075f4fc069Sjilinxpd if (error == 0) 9085f4fc069Sjilinxpd error = ESTALE; 9095f4fc069Sjilinxpd break; 9105f4fc069Sjilinxpd } 9115f4fc069Sjilinxpd 9125f4fc069Sjilinxpd /* 9135f4fc069Sjilinxpd * From NFS: Don't create dirty pages faster than they 9145f4fc069Sjilinxpd * can be cleaned. 9155f4fc069Sjilinxpd * 9165f4fc069Sjilinxpd * Here NFS also checks for async writes (np->r_awcount) 9175f4fc069Sjilinxpd */ 9189c9af259SGordon Ross mutex_enter(&np->r_statelock); 9195f4fc069Sjilinxpd while (np->r_gcount > 0) { 9205f4fc069Sjilinxpd if (SMBINTR(vp)) { 9215f4fc069Sjilinxpd klwp_t *lwp = ttolwp(curthread); 9225f4fc069Sjilinxpd 9235f4fc069Sjilinxpd if (lwp != NULL) 9245f4fc069Sjilinxpd lwp->lwp_nostop++; 9255f4fc069Sjilinxpd if (!cv_wait_sig(&np->r_cv, &np->r_statelock)) { 9265f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 9275f4fc069Sjilinxpd if (lwp != NULL) 9285f4fc069Sjilinxpd lwp->lwp_nostop--; 9295f4fc069Sjilinxpd error = EINTR; 9305f4fc069Sjilinxpd goto bottom; 9315f4fc069Sjilinxpd } 9325f4fc069Sjilinxpd if (lwp != NULL) 9335f4fc069Sjilinxpd lwp->lwp_nostop--; 9345f4fc069Sjilinxpd } else 9355f4fc069Sjilinxpd cv_wait(&np->r_cv, &np->r_statelock); 9365f4fc069Sjilinxpd } 9379c9af259SGordon Ross mutex_exit(&np->r_statelock); 9385f4fc069Sjilinxpd 9395f4fc069Sjilinxpd /* 9405f4fc069Sjilinxpd * Touch the page and fault it in if it is not in core 9415f4fc069Sjilinxpd * before segmap_getmapflt or vpm_data_copy can lock it. 9425f4fc069Sjilinxpd * This is to avoid the deadlock if the buffer is mapped 9435f4fc069Sjilinxpd * to the same file through mmap which we want to write. 9445f4fc069Sjilinxpd */ 9455f4fc069Sjilinxpd uio_prefaultpages((long)n, uiop); 9465f4fc069Sjilinxpd 9475f4fc069Sjilinxpd if (vpm_enable) { 9485f4fc069Sjilinxpd /* 9495f4fc069Sjilinxpd * It will use kpm mappings, so no need to 9505f4fc069Sjilinxpd * pass an address. 9515f4fc069Sjilinxpd */ 9525f4fc069Sjilinxpd error = smbfs_writenp(np, NULL, n, uiop, 0); 9535f4fc069Sjilinxpd } else { 9545f4fc069Sjilinxpd if (segmap_kpm) { 9555f4fc069Sjilinxpd int pon = uiop->uio_loffset & PAGEOFFSET; 9565f4fc069Sjilinxpd size_t pn = MIN(PAGESIZE - pon, 9575f4fc069Sjilinxpd uiop->uio_resid); 9585f4fc069Sjilinxpd int pagecreate; 9595f4fc069Sjilinxpd 9605f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 9615f4fc069Sjilinxpd pagecreate = (pon == 0) && (pn == PAGESIZE || 9625f4fc069Sjilinxpd uiop->uio_loffset + pn >= np->r_size); 9635f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 9645f4fc069Sjilinxpd 9655f4fc069Sjilinxpd base = segmap_getmapflt(segkmap, vp, off + on, 9665f4fc069Sjilinxpd pn, !pagecreate, S_WRITE); 9675f4fc069Sjilinxpd 9685f4fc069Sjilinxpd error = smbfs_writenp(np, base + pon, n, uiop, 9695f4fc069Sjilinxpd pagecreate); 9705f4fc069Sjilinxpd 9715f4fc069Sjilinxpd } else { 9725f4fc069Sjilinxpd base = segmap_getmapflt(segkmap, vp, off + on, 9735f4fc069Sjilinxpd n, 0, S_READ); 9745f4fc069Sjilinxpd error = smbfs_writenp(np, base + on, n, uiop, 0); 9755f4fc069Sjilinxpd } 9769c9af259SGordon Ross } 9779c9af259SGordon Ross 9785f4fc069Sjilinxpd if (!error) { 9795f4fc069Sjilinxpd if (smi->smi_flags & SMI_NOAC) 9805f4fc069Sjilinxpd flags = SM_WRITE; 9815f4fc069Sjilinxpd else if ((uiop->uio_loffset % bsize) == 0 || 9825f4fc069Sjilinxpd IS_SWAPVP(vp)) { 9835f4fc069Sjilinxpd /* 9845f4fc069Sjilinxpd * Have written a whole block. 9855f4fc069Sjilinxpd * Start an asynchronous write 9865f4fc069Sjilinxpd * and mark the buffer to 9875f4fc069Sjilinxpd * indicate that it won't be 9885f4fc069Sjilinxpd * needed again soon. 9895f4fc069Sjilinxpd */ 9905f4fc069Sjilinxpd flags = SM_WRITE | SM_ASYNC | SM_DONTNEED; 9915f4fc069Sjilinxpd } else 9925f4fc069Sjilinxpd flags = 0; 9935f4fc069Sjilinxpd if ((ioflag & (FSYNC|FDSYNC)) || 9945f4fc069Sjilinxpd (np->r_flags & ROUTOFSPACE)) { 9955f4fc069Sjilinxpd flags &= ~SM_ASYNC; 9965f4fc069Sjilinxpd flags |= SM_WRITE; 9975f4fc069Sjilinxpd } 9985f4fc069Sjilinxpd if (vpm_enable) { 9995f4fc069Sjilinxpd error = vpm_sync_pages(vp, off, n, flags); 10005f4fc069Sjilinxpd } else { 10015f4fc069Sjilinxpd error = segmap_release(segkmap, base, flags); 10025f4fc069Sjilinxpd } 10035f4fc069Sjilinxpd } else { 10045f4fc069Sjilinxpd if (vpm_enable) { 10055f4fc069Sjilinxpd (void) vpm_sync_pages(vp, off, n, 0); 10065f4fc069Sjilinxpd } else { 10075f4fc069Sjilinxpd (void) segmap_release(segkmap, base, 0); 10085f4fc069Sjilinxpd } 10095f4fc069Sjilinxpd /* 10105f4fc069Sjilinxpd * In the event that we got an access error while 10115f4fc069Sjilinxpd * faulting in a page for a write-only file just 10125f4fc069Sjilinxpd * force a write. 10135f4fc069Sjilinxpd */ 10145f4fc069Sjilinxpd if (error == EACCES) 10155f4fc069Sjilinxpd goto smbfs_fwrite; 10165f4fc069Sjilinxpd } 10175f4fc069Sjilinxpd } while (!error && uiop->uio_resid > 0); 1018*8329232eSGordon Ross #else // _KERNEL 1019*8329232eSGordon Ross last_resid = uiop->uio_resid; 1020*8329232eSGordon Ross last_off = uiop->uio_loffset; 1021*8329232eSGordon Ross error = ENOSYS; 1022*8329232eSGordon Ross #endif // _KERNEL 10234bff34e3Sthurlow 10245f4fc069Sjilinxpd bottom: 10259c9af259SGordon Ross /* undo adjustment of resid */ 10265f4fc069Sjilinxpd if (error) { 10275f4fc069Sjilinxpd uiop->uio_resid = last_resid + past_limit; 10285f4fc069Sjilinxpd uiop->uio_loffset = last_off; 10295f4fc069Sjilinxpd } else { 10305f4fc069Sjilinxpd uiop->uio_resid += past_limit; 10315f4fc069Sjilinxpd } 10329c9af259SGordon Ross 10339c9af259SGordon Ross return (error); 10344bff34e3Sthurlow } 10354bff34e3Sthurlow 1036*8329232eSGordon Ross #ifdef _KERNEL 1037*8329232eSGordon Ross 10385f4fc069Sjilinxpd /* 10395f4fc069Sjilinxpd * Like nfs_client.c: writerp() 10405f4fc069Sjilinxpd * 10415f4fc069Sjilinxpd * Write by creating pages and uiomove data onto them. 10425f4fc069Sjilinxpd */ 10434bff34e3Sthurlow 10445f4fc069Sjilinxpd int 10455f4fc069Sjilinxpd smbfs_writenp(smbnode_t *np, caddr_t base, int tcount, struct uio *uio, 10465f4fc069Sjilinxpd int pgcreated) 10477568150aSgwr { 10485f4fc069Sjilinxpd int pagecreate; 10495f4fc069Sjilinxpd int n; 10505f4fc069Sjilinxpd int saved_n; 10515f4fc069Sjilinxpd caddr_t saved_base; 10525f4fc069Sjilinxpd u_offset_t offset; 10537568150aSgwr int error; 10545f4fc069Sjilinxpd int sm_error; 10555f4fc069Sjilinxpd vnode_t *vp = SMBTOV(np); 10567568150aSgwr 10575f4fc069Sjilinxpd ASSERT(tcount <= MAXBSIZE && tcount <= uio->uio_resid); 10585f4fc069Sjilinxpd ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER)); 10595f4fc069Sjilinxpd if (!vpm_enable) { 10605f4fc069Sjilinxpd ASSERT(((uintptr_t)base & MAXBOFFSET) + tcount <= MAXBSIZE); 10615f4fc069Sjilinxpd } 10627568150aSgwr 10635f4fc069Sjilinxpd /* 10645f4fc069Sjilinxpd * Move bytes in at most PAGESIZE chunks. We must avoid 10655f4fc069Sjilinxpd * spanning pages in uiomove() because page faults may cause 10665f4fc069Sjilinxpd * the cache to be invalidated out from under us. The r_size is not 10675f4fc069Sjilinxpd * updated until after the uiomove. If we push the last page of a 10685f4fc069Sjilinxpd * file before r_size is correct, we will lose the data written past 10695f4fc069Sjilinxpd * the current (and invalid) r_size. 10705f4fc069Sjilinxpd */ 10715f4fc069Sjilinxpd do { 10725f4fc069Sjilinxpd offset = uio->uio_loffset; 10735f4fc069Sjilinxpd pagecreate = 0; 10747568150aSgwr 10755f4fc069Sjilinxpd /* 10765f4fc069Sjilinxpd * n is the number of bytes required to satisfy the request 10775f4fc069Sjilinxpd * or the number of bytes to fill out the page. 10785f4fc069Sjilinxpd */ 10795f4fc069Sjilinxpd n = (int)MIN((PAGESIZE - (offset & PAGEOFFSET)), tcount); 10807568150aSgwr 10815f4fc069Sjilinxpd /* 10825f4fc069Sjilinxpd * Check to see if we can skip reading in the page 10835f4fc069Sjilinxpd * and just allocate the memory. We can do this 10845f4fc069Sjilinxpd * if we are going to rewrite the entire mapping 10855f4fc069Sjilinxpd * or if we are going to write to or beyond the current 10865f4fc069Sjilinxpd * end of file from the beginning of the mapping. 10875f4fc069Sjilinxpd * 10885f4fc069Sjilinxpd * The read of r_size is now protected by r_statelock. 10895f4fc069Sjilinxpd */ 10905f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 10915f4fc069Sjilinxpd /* 10925f4fc069Sjilinxpd * When pgcreated is nonzero the caller has already done 10935f4fc069Sjilinxpd * a segmap_getmapflt with forcefault 0 and S_WRITE. With 10945f4fc069Sjilinxpd * segkpm this means we already have at least one page 10955f4fc069Sjilinxpd * created and mapped at base. 10965f4fc069Sjilinxpd */ 10975f4fc069Sjilinxpd pagecreate = pgcreated || 10985f4fc069Sjilinxpd ((offset & PAGEOFFSET) == 0 && 10995f4fc069Sjilinxpd (n == PAGESIZE || ((offset + n) >= np->r_size))); 11007568150aSgwr 11015f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 11025f4fc069Sjilinxpd if (!vpm_enable && pagecreate) { 11035f4fc069Sjilinxpd /* 11045f4fc069Sjilinxpd * The last argument tells segmap_pagecreate() to 11055f4fc069Sjilinxpd * always lock the page, as opposed to sometimes 11065f4fc069Sjilinxpd * returning with the page locked. This way we avoid a 11075f4fc069Sjilinxpd * fault on the ensuing uiomove(), but also 11085f4fc069Sjilinxpd * more importantly (to fix bug 1094402) we can 11095f4fc069Sjilinxpd * call segmap_fault() to unlock the page in all 11105f4fc069Sjilinxpd * cases. An alternative would be to modify 11115f4fc069Sjilinxpd * segmap_pagecreate() to tell us when it is 11125f4fc069Sjilinxpd * locking a page, but that's a fairly major 11135f4fc069Sjilinxpd * interface change. 11145f4fc069Sjilinxpd */ 11155f4fc069Sjilinxpd if (pgcreated == 0) 11165f4fc069Sjilinxpd (void) segmap_pagecreate(segkmap, base, 11175f4fc069Sjilinxpd (uint_t)n, 1); 11185f4fc069Sjilinxpd saved_base = base; 11195f4fc069Sjilinxpd saved_n = n; 11205f4fc069Sjilinxpd } 11217568150aSgwr 11227568150aSgwr /* 11235f4fc069Sjilinxpd * The number of bytes of data in the last page can not 11245f4fc069Sjilinxpd * be accurately be determined while page is being 11255f4fc069Sjilinxpd * uiomove'd to and the size of the file being updated. 11265f4fc069Sjilinxpd * Thus, inform threads which need to know accurately 11275f4fc069Sjilinxpd * how much data is in the last page of the file. They 11285f4fc069Sjilinxpd * will not do the i/o immediately, but will arrange for 11295f4fc069Sjilinxpd * the i/o to happen later when this modify operation 11305f4fc069Sjilinxpd * will have finished. 11317568150aSgwr */ 11325f4fc069Sjilinxpd ASSERT(!(np->r_flags & RMODINPROGRESS)); 11335f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 11345f4fc069Sjilinxpd np->r_flags |= RMODINPROGRESS; 11355f4fc069Sjilinxpd np->r_modaddr = (offset & MAXBMASK); 11365f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 11377568150aSgwr 11385f4fc069Sjilinxpd if (vpm_enable) { 11395f4fc069Sjilinxpd /* 11405f4fc069Sjilinxpd * Copy data. If new pages are created, part of 11415f4fc069Sjilinxpd * the page that is not written will be initizliazed 11425f4fc069Sjilinxpd * with zeros. 11435f4fc069Sjilinxpd */ 11445f4fc069Sjilinxpd error = vpm_data_copy(vp, offset, n, uio, 11455f4fc069Sjilinxpd !pagecreate, NULL, 0, S_WRITE); 11465f4fc069Sjilinxpd } else { 11475f4fc069Sjilinxpd error = uiomove(base, n, UIO_WRITE, uio); 11485f4fc069Sjilinxpd } 11497568150aSgwr 11507568150aSgwr /* 11515f4fc069Sjilinxpd * r_size is the maximum number of 11525f4fc069Sjilinxpd * bytes known to be in the file. 11535f4fc069Sjilinxpd * Make sure it is at least as high as the 11545f4fc069Sjilinxpd * first unwritten byte pointed to by uio_loffset. 11557568150aSgwr */ 11565f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 11575f4fc069Sjilinxpd if (np->r_size < uio->uio_loffset) 11585f4fc069Sjilinxpd np->r_size = uio->uio_loffset; 11595f4fc069Sjilinxpd np->r_flags &= ~RMODINPROGRESS; 11605f4fc069Sjilinxpd np->r_flags |= RDIRTY; 11615f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 11627568150aSgwr 11635f4fc069Sjilinxpd /* n = # of bytes written */ 11645f4fc069Sjilinxpd n = (int)(uio->uio_loffset - offset); 11657568150aSgwr 11665f4fc069Sjilinxpd if (!vpm_enable) { 11675f4fc069Sjilinxpd base += n; 11685f4fc069Sjilinxpd } 11695f4fc069Sjilinxpd tcount -= n; 11705f4fc069Sjilinxpd /* 11715f4fc069Sjilinxpd * If we created pages w/o initializing them completely, 11725f4fc069Sjilinxpd * we need to zero the part that wasn't set up. 11735f4fc069Sjilinxpd * This happens on a most EOF write cases and if 11745f4fc069Sjilinxpd * we had some sort of error during the uiomove. 11755f4fc069Sjilinxpd */ 11765f4fc069Sjilinxpd if (!vpm_enable && pagecreate) { 11775f4fc069Sjilinxpd if ((uio->uio_loffset & PAGEOFFSET) || n == 0) 11785f4fc069Sjilinxpd (void) kzero(base, PAGESIZE - n); 11795f4fc069Sjilinxpd 11805f4fc069Sjilinxpd if (pgcreated) { 11815f4fc069Sjilinxpd /* 11825f4fc069Sjilinxpd * Caller is responsible for this page, 11835f4fc069Sjilinxpd * it was not created in this loop. 11845f4fc069Sjilinxpd */ 11855f4fc069Sjilinxpd pgcreated = 0; 11865f4fc069Sjilinxpd } else { 11875f4fc069Sjilinxpd /* 11885f4fc069Sjilinxpd * For bug 1094402: segmap_pagecreate locks 11895f4fc069Sjilinxpd * page. Unlock it. This also unlocks the 11905f4fc069Sjilinxpd * pages allocated by page_create_va() in 11915f4fc069Sjilinxpd * segmap_pagecreate(). 11925f4fc069Sjilinxpd */ 11935f4fc069Sjilinxpd sm_error = segmap_fault(kas.a_hat, segkmap, 11945f4fc069Sjilinxpd saved_base, saved_n, 11955f4fc069Sjilinxpd F_SOFTUNLOCK, S_WRITE); 11965f4fc069Sjilinxpd if (error == 0) 11975f4fc069Sjilinxpd error = sm_error; 11985f4fc069Sjilinxpd } 11995f4fc069Sjilinxpd } 12005f4fc069Sjilinxpd } while (tcount > 0 && error == 0); 12017568150aSgwr 12027568150aSgwr return (error); 12037568150aSgwr } 12047568150aSgwr 12054bff34e3Sthurlow /* 12065f4fc069Sjilinxpd * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED} 12075f4fc069Sjilinxpd * Like nfs3_rdwrlbn() 12084bff34e3Sthurlow */ 12094bff34e3Sthurlow static int 12105f4fc069Sjilinxpd smbfs_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len, 12115f4fc069Sjilinxpd int flags, cred_t *cr) 12124bff34e3Sthurlow { 12135f4fc069Sjilinxpd smbmntinfo_t *smi = VTOSMI(vp); 12145f4fc069Sjilinxpd struct buf *bp; 12155f4fc069Sjilinxpd int error; 12165f4fc069Sjilinxpd int sync; 12174bff34e3Sthurlow 1218a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 12194bff34e3Sthurlow return (EIO); 12204bff34e3Sthurlow 12214bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 12224bff34e3Sthurlow return (EIO); 12234bff34e3Sthurlow 12245f4fc069Sjilinxpd bp = pageio_setup(pp, len, vp, flags); 12255f4fc069Sjilinxpd ASSERT(bp != NULL); 12265f4fc069Sjilinxpd 12274bff34e3Sthurlow /* 12285f4fc069Sjilinxpd * pageio_setup should have set b_addr to 0. This 12295f4fc069Sjilinxpd * is correct since we want to do I/O on a page 12305f4fc069Sjilinxpd * boundary. bp_mapin will use this addr to calculate 12315f4fc069Sjilinxpd * an offset, and then set b_addr to the kernel virtual 12325f4fc069Sjilinxpd * address it allocated for us. 12334bff34e3Sthurlow */ 12345f4fc069Sjilinxpd ASSERT(bp->b_un.b_addr == 0); 12354bff34e3Sthurlow 12365f4fc069Sjilinxpd bp->b_edev = 0; 12375f4fc069Sjilinxpd bp->b_dev = 0; 12385f4fc069Sjilinxpd bp->b_lblkno = lbtodb(off); 12395f4fc069Sjilinxpd bp->b_file = vp; 12405f4fc069Sjilinxpd bp->b_offset = (offset_t)off; 12415f4fc069Sjilinxpd bp_mapin(bp); 12425f4fc069Sjilinxpd 12435f4fc069Sjilinxpd /* 12445f4fc069Sjilinxpd * Calculate the desired level of stability to write data. 12455f4fc069Sjilinxpd */ 12465f4fc069Sjilinxpd if ((flags & (B_WRITE|B_ASYNC)) == (B_WRITE|B_ASYNC) && 12475f4fc069Sjilinxpd freemem > desfree) { 12485f4fc069Sjilinxpd sync = 0; 12495f4fc069Sjilinxpd } else { 12505f4fc069Sjilinxpd sync = 1; 12515f4fc069Sjilinxpd } 12525f4fc069Sjilinxpd 12535f4fc069Sjilinxpd error = smbfs_bio(bp, sync, cr); 12545f4fc069Sjilinxpd 12555f4fc069Sjilinxpd bp_mapout(bp); 12565f4fc069Sjilinxpd pageio_done(bp); 12575f4fc069Sjilinxpd 12585f4fc069Sjilinxpd return (error); 12595f4fc069Sjilinxpd } 12605f4fc069Sjilinxpd 12615f4fc069Sjilinxpd 12625f4fc069Sjilinxpd /* 12635f4fc069Sjilinxpd * Corresponds to nfs3_vnopc.c : nfs3_bio(), though the NFS code 12645f4fc069Sjilinxpd * uses nfs3read()/nfs3write() where we use smb_rwuio(). Also, 12655f4fc069Sjilinxpd * NFS has this later in the file. Move it up here closer to 12665f4fc069Sjilinxpd * the one call site just above. 12675f4fc069Sjilinxpd */ 12685f4fc069Sjilinxpd 12695f4fc069Sjilinxpd static int 12705f4fc069Sjilinxpd smbfs_bio(struct buf *bp, int sync, cred_t *cr) 12715f4fc069Sjilinxpd { 12725f4fc069Sjilinxpd struct iovec aiov[1]; 12735f4fc069Sjilinxpd struct uio auio; 12745f4fc069Sjilinxpd struct smb_cred scred; 12755f4fc069Sjilinxpd smbnode_t *np = VTOSMB(bp->b_vp); 12765f4fc069Sjilinxpd smbmntinfo_t *smi = np->n_mount; 12775f4fc069Sjilinxpd smb_share_t *ssp = smi->smi_share; 12785f4fc069Sjilinxpd offset_t offset; 12795f4fc069Sjilinxpd offset_t endoff; 12805f4fc069Sjilinxpd size_t count; 12815f4fc069Sjilinxpd size_t past_eof; 12825f4fc069Sjilinxpd int error; 12835f4fc069Sjilinxpd 12845f4fc069Sjilinxpd ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone); 12855f4fc069Sjilinxpd 12865f4fc069Sjilinxpd offset = ldbtob(bp->b_lblkno); 12875f4fc069Sjilinxpd count = bp->b_bcount; 12885f4fc069Sjilinxpd endoff = offset + count; 12895f4fc069Sjilinxpd if (offset < 0 || endoff < 0) 12905f4fc069Sjilinxpd return (EINVAL); 12915f4fc069Sjilinxpd 12925f4fc069Sjilinxpd /* 12935f4fc069Sjilinxpd * Limit file I/O to the remaining file size, but see 12945f4fc069Sjilinxpd * the notes in smbfs_getpage about SMBFS_EOF. 12955f4fc069Sjilinxpd */ 12965f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 12975f4fc069Sjilinxpd if (offset >= np->r_size) { 12985f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 12995f4fc069Sjilinxpd if (bp->b_flags & B_READ) { 13005f4fc069Sjilinxpd return (SMBFS_EOF); 13015f4fc069Sjilinxpd } else { 13025f4fc069Sjilinxpd return (EINVAL); 13035f4fc069Sjilinxpd } 13045f4fc069Sjilinxpd } 13055f4fc069Sjilinxpd if (endoff > np->r_size) { 13065f4fc069Sjilinxpd past_eof = (size_t)(endoff - np->r_size); 13075f4fc069Sjilinxpd count -= past_eof; 13085f4fc069Sjilinxpd } else 13095f4fc069Sjilinxpd past_eof = 0; 13105f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 13115f4fc069Sjilinxpd ASSERT(count > 0); 13125f4fc069Sjilinxpd 13135f4fc069Sjilinxpd /* Caller did bpmapin(). Mapped address is... */ 13145f4fc069Sjilinxpd aiov[0].iov_base = bp->b_un.b_addr; 13155f4fc069Sjilinxpd aiov[0].iov_len = count; 13165f4fc069Sjilinxpd auio.uio_iov = aiov; 13175f4fc069Sjilinxpd auio.uio_iovcnt = 1; 13185f4fc069Sjilinxpd auio.uio_loffset = offset; 13195f4fc069Sjilinxpd auio.uio_segflg = UIO_SYSSPACE; 13205f4fc069Sjilinxpd auio.uio_fmode = 0; 13215f4fc069Sjilinxpd auio.uio_resid = count; 13225f4fc069Sjilinxpd 13235f4fc069Sjilinxpd /* Shared lock for n_fid use in smb_rwuio */ 13245f4fc069Sjilinxpd if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, 13255f4fc069Sjilinxpd smi->smi_flags & SMI_INT)) 13265f4fc069Sjilinxpd return (EINTR); 13275f4fc069Sjilinxpd smb_credinit(&scred, cr); 13285f4fc069Sjilinxpd 13295f4fc069Sjilinxpd DTRACE_IO1(start, struct buf *, bp); 13305f4fc069Sjilinxpd 13315f4fc069Sjilinxpd if (bp->b_flags & B_READ) { 13325f4fc069Sjilinxpd 13335f4fc069Sjilinxpd /* After reconnect, n_fid is invalid */ 13345f4fc069Sjilinxpd if (np->n_vcgenid != ssp->ss_vcgenid) 13355f4fc069Sjilinxpd error = ESTALE; 13365f4fc069Sjilinxpd else 13375f4fc069Sjilinxpd error = smb_rwuio(ssp, np->n_fid, UIO_READ, 13385f4fc069Sjilinxpd &auio, &scred, smb_timo_read); 13395f4fc069Sjilinxpd 13405f4fc069Sjilinxpd /* Like NFS, only set b_error here. */ 13415f4fc069Sjilinxpd bp->b_error = error; 13425f4fc069Sjilinxpd bp->b_resid = auio.uio_resid; 13435f4fc069Sjilinxpd 13445f4fc069Sjilinxpd if (!error && auio.uio_resid != 0) 13455f4fc069Sjilinxpd error = EIO; 13465f4fc069Sjilinxpd if (!error && past_eof != 0) { 13475f4fc069Sjilinxpd /* Zero the memory beyond EOF. */ 13485f4fc069Sjilinxpd bzero(bp->b_un.b_addr + count, past_eof); 13495f4fc069Sjilinxpd } 13505f4fc069Sjilinxpd } else { 13515f4fc069Sjilinxpd 13525f4fc069Sjilinxpd /* After reconnect, n_fid is invalid */ 13535f4fc069Sjilinxpd if (np->n_vcgenid != ssp->ss_vcgenid) 13545f4fc069Sjilinxpd error = ESTALE; 13555f4fc069Sjilinxpd else 13565f4fc069Sjilinxpd error = smb_rwuio(ssp, np->n_fid, UIO_WRITE, 13575f4fc069Sjilinxpd &auio, &scred, smb_timo_write); 13585f4fc069Sjilinxpd 13595f4fc069Sjilinxpd /* Like NFS, only set b_error here. */ 13605f4fc069Sjilinxpd bp->b_error = error; 13615f4fc069Sjilinxpd bp->b_resid = auio.uio_resid; 13625f4fc069Sjilinxpd 13635f4fc069Sjilinxpd if (!error && auio.uio_resid != 0) 13645f4fc069Sjilinxpd error = EIO; 13655f4fc069Sjilinxpd if (!error && sync) { 13665f4fc069Sjilinxpd (void) smbfs_smb_flush(np, &scred); 13675f4fc069Sjilinxpd } 13685f4fc069Sjilinxpd } 13695f4fc069Sjilinxpd 13705f4fc069Sjilinxpd /* 13715f4fc069Sjilinxpd * This comes from nfs3_commit() 13725f4fc069Sjilinxpd */ 13735f4fc069Sjilinxpd if (error != 0) { 13745f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 13755f4fc069Sjilinxpd if (error == ESTALE) 13765f4fc069Sjilinxpd np->r_flags |= RSTALE; 13775f4fc069Sjilinxpd if (!np->r_error) 13785f4fc069Sjilinxpd np->r_error = error; 13795f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 13805f4fc069Sjilinxpd bp->b_flags |= B_ERROR; 13815f4fc069Sjilinxpd } 13825f4fc069Sjilinxpd 13835f4fc069Sjilinxpd DTRACE_IO1(done, struct buf *, bp); 13845f4fc069Sjilinxpd 13855f4fc069Sjilinxpd smb_credrele(&scred); 13865f4fc069Sjilinxpd smbfs_rw_exit(&np->r_lkserlock); 13875f4fc069Sjilinxpd 13885f4fc069Sjilinxpd if (error == ESTALE) 13895f4fc069Sjilinxpd smbfs_attrcache_remove(np); 13905f4fc069Sjilinxpd 13915f4fc069Sjilinxpd return (error); 13925f4fc069Sjilinxpd } 1393*8329232eSGordon Ross #endif // _KERNEL 13945f4fc069Sjilinxpd 13955f4fc069Sjilinxpd /* 13965f4fc069Sjilinxpd * Here NFS has: nfs3write, nfs3read 13975f4fc069Sjilinxpd * We use smb_rwuio instead. 13985f4fc069Sjilinxpd */ 13995f4fc069Sjilinxpd 14005f4fc069Sjilinxpd /* ARGSUSED */ 14015f4fc069Sjilinxpd static int 14025f4fc069Sjilinxpd smbfs_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag, 14035f4fc069Sjilinxpd cred_t *cr, int *rvalp, caller_context_t *ct) 14045f4fc069Sjilinxpd { 14055f4fc069Sjilinxpd int error; 14065f4fc069Sjilinxpd smbmntinfo_t *smi; 14075f4fc069Sjilinxpd 14085f4fc069Sjilinxpd smi = VTOSMI(vp); 14095f4fc069Sjilinxpd 14105f4fc069Sjilinxpd if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 14115f4fc069Sjilinxpd return (EIO); 14125f4fc069Sjilinxpd 14135f4fc069Sjilinxpd if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 14145f4fc069Sjilinxpd return (EIO); 14155f4fc069Sjilinxpd 14165f4fc069Sjilinxpd switch (cmd) { 14175f4fc069Sjilinxpd 14185f4fc069Sjilinxpd case _FIOFFS: 14195f4fc069Sjilinxpd error = smbfs_fsync(vp, 0, cr, ct); 14205f4fc069Sjilinxpd break; 14215f4fc069Sjilinxpd 14225f4fc069Sjilinxpd /* 14235f4fc069Sjilinxpd * The following two ioctls are used by bfu. 14245f4fc069Sjilinxpd * Silently ignore to avoid bfu errors. 14255f4fc069Sjilinxpd */ 14265f4fc069Sjilinxpd case _FIOGDIO: 14275f4fc069Sjilinxpd case _FIOSDIO: 14285f4fc069Sjilinxpd error = 0; 14295f4fc069Sjilinxpd break; 14305f4fc069Sjilinxpd 14315f4fc069Sjilinxpd #if 0 /* Todo - SMB ioctl query regions */ 14325f4fc069Sjilinxpd case _FIO_SEEK_DATA: 14335f4fc069Sjilinxpd case _FIO_SEEK_HOLE: 14345f4fc069Sjilinxpd #endif 14355f4fc069Sjilinxpd 14365f4fc069Sjilinxpd case _FIODIRECTIO: 14375f4fc069Sjilinxpd error = smbfs_directio(vp, (int)arg, cr); 14385f4fc069Sjilinxpd break; 14395f4fc069Sjilinxpd 14405f4fc069Sjilinxpd /* 14415f4fc069Sjilinxpd * Allow get/set with "raw" security descriptor (SD) data. 14425f4fc069Sjilinxpd * Useful for testing, diagnosing idmap problems, etc. 14435f4fc069Sjilinxpd */ 14445f4fc069Sjilinxpd case SMBFSIO_GETSD: 14455f4fc069Sjilinxpd error = smbfs_acl_iocget(vp, arg, flag, cr); 14465f4fc069Sjilinxpd break; 14475f4fc069Sjilinxpd 14485f4fc069Sjilinxpd case SMBFSIO_SETSD: 14495f4fc069Sjilinxpd error = smbfs_acl_iocset(vp, arg, flag, cr); 14505f4fc069Sjilinxpd break; 14515f4fc069Sjilinxpd 14525f4fc069Sjilinxpd default: 14535f4fc069Sjilinxpd error = ENOTTY; 14545f4fc069Sjilinxpd break; 14555f4fc069Sjilinxpd } 14565f4fc069Sjilinxpd 14575f4fc069Sjilinxpd return (error); 14585f4fc069Sjilinxpd } 14595f4fc069Sjilinxpd 14605f4fc069Sjilinxpd 14615f4fc069Sjilinxpd /* 14625f4fc069Sjilinxpd * Return either cached or remote attributes. If get remote attr 14635f4fc069Sjilinxpd * use them to check and invalidate caches, then cache the new attributes. 14645f4fc069Sjilinxpd */ 14655f4fc069Sjilinxpd /* ARGSUSED */ 14665f4fc069Sjilinxpd static int 14675f4fc069Sjilinxpd smbfs_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr, 14685f4fc069Sjilinxpd caller_context_t *ct) 14695f4fc069Sjilinxpd { 14705f4fc069Sjilinxpd smbnode_t *np; 14715f4fc069Sjilinxpd smbmntinfo_t *smi; 14725f4fc069Sjilinxpd int error; 14735f4fc069Sjilinxpd 14745f4fc069Sjilinxpd smi = VTOSMI(vp); 14755f4fc069Sjilinxpd 14765f4fc069Sjilinxpd if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 14775f4fc069Sjilinxpd return (EIO); 14785f4fc069Sjilinxpd 14795f4fc069Sjilinxpd if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 14805f4fc069Sjilinxpd return (EIO); 14815f4fc069Sjilinxpd 14825f4fc069Sjilinxpd /* 14835f4fc069Sjilinxpd * If it has been specified that the return value will 14845f4fc069Sjilinxpd * just be used as a hint, and we are only being asked 14855f4fc069Sjilinxpd * for size, fsid or rdevid, then return the client's 14865f4fc069Sjilinxpd * notion of these values without checking to make sure 14875f4fc069Sjilinxpd * that the attribute cache is up to date. 14885f4fc069Sjilinxpd * The whole point is to avoid an over the wire GETATTR 14895f4fc069Sjilinxpd * call. 14905f4fc069Sjilinxpd */ 14915f4fc069Sjilinxpd np = VTOSMB(vp); 14925f4fc069Sjilinxpd if (flags & ATTR_HINT) { 14935f4fc069Sjilinxpd if (vap->va_mask == 14945f4fc069Sjilinxpd (vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) { 14955f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 14965f4fc069Sjilinxpd if (vap->va_mask | AT_SIZE) 14975f4fc069Sjilinxpd vap->va_size = np->r_size; 14985f4fc069Sjilinxpd if (vap->va_mask | AT_FSID) 14995f4fc069Sjilinxpd vap->va_fsid = vp->v_vfsp->vfs_dev; 15005f4fc069Sjilinxpd if (vap->va_mask | AT_RDEV) 15015f4fc069Sjilinxpd vap->va_rdev = vp->v_rdev; 15025f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 15035f4fc069Sjilinxpd return (0); 15045f4fc069Sjilinxpd } 15055f4fc069Sjilinxpd } 15065f4fc069Sjilinxpd 15075f4fc069Sjilinxpd /* 15085f4fc069Sjilinxpd * Only need to flush pages if asking for the mtime 15095f4fc069Sjilinxpd * and if there any dirty pages. 15105f4fc069Sjilinxpd * 15115f4fc069Sjilinxpd * Here NFS also checks for async writes (np->r_awcount) 15125f4fc069Sjilinxpd */ 15135f4fc069Sjilinxpd if (vap->va_mask & AT_MTIME) { 15145f4fc069Sjilinxpd if (vn_has_cached_data(vp) && 15155f4fc069Sjilinxpd ((np->r_flags & RDIRTY) != 0)) { 15165f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 15175f4fc069Sjilinxpd np->r_gcount++; 15185f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 15195f4fc069Sjilinxpd error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct); 15205f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 15215f4fc069Sjilinxpd if (error && (error == ENOSPC || error == EDQUOT)) { 15225f4fc069Sjilinxpd if (!np->r_error) 15235f4fc069Sjilinxpd np->r_error = error; 15245f4fc069Sjilinxpd } 15255f4fc069Sjilinxpd if (--np->r_gcount == 0) 15265f4fc069Sjilinxpd cv_broadcast(&np->r_cv); 15275f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 15285f4fc069Sjilinxpd } 15295f4fc069Sjilinxpd } 15305f4fc069Sjilinxpd 15315f4fc069Sjilinxpd return (smbfsgetattr(vp, vap, cr)); 15325f4fc069Sjilinxpd } 15334bff34e3Sthurlow 153402d09e03SGordon Ross /* smbfsgetattr() in smbfs_client.c */ 15354bff34e3Sthurlow 15364bff34e3Sthurlow /*ARGSUSED4*/ 15374bff34e3Sthurlow static int 15384bff34e3Sthurlow smbfs_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr, 15394bff34e3Sthurlow caller_context_t *ct) 15404bff34e3Sthurlow { 154102d09e03SGordon Ross vfs_t *vfsp; 154202d09e03SGordon Ross smbmntinfo_t *smi; 15434bff34e3Sthurlow int error; 15444bff34e3Sthurlow uint_t mask; 15454bff34e3Sthurlow struct vattr oldva; 15464bff34e3Sthurlow 154702d09e03SGordon Ross vfsp = vp->v_vfsp; 154802d09e03SGordon Ross smi = VFTOSMI(vfsp); 15494bff34e3Sthurlow 1550a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 15514bff34e3Sthurlow return (EIO); 15524bff34e3Sthurlow 155302d09e03SGordon Ross if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED) 15544bff34e3Sthurlow return (EIO); 15554bff34e3Sthurlow 15564bff34e3Sthurlow mask = vap->va_mask; 15574bff34e3Sthurlow if (mask & AT_NOSET) 15584bff34e3Sthurlow return (EINVAL); 15594bff34e3Sthurlow 156002d09e03SGordon Ross if (vfsp->vfs_flag & VFS_RDONLY) 156102d09e03SGordon Ross return (EROFS); 156202d09e03SGordon Ross 1563bd7c6f51SGordon Ross /* 1564bd7c6f51SGordon Ross * This is a _local_ access check so that only the owner of 1565bd7c6f51SGordon Ross * this mount can set attributes. With ACLs enabled, the 1566bd7c6f51SGordon Ross * file owner can be different from the mount owner, and we 1567bd7c6f51SGordon Ross * need to check the _mount_ owner here. See _access_rwx 1568bd7c6f51SGordon Ross */ 156902d09e03SGordon Ross bzero(&oldva, sizeof (oldva)); 1570bd7c6f51SGordon Ross oldva.va_mask = AT_TYPE | AT_MODE; 15714bff34e3Sthurlow error = smbfsgetattr(vp, &oldva, cr); 15724bff34e3Sthurlow if (error) 15734bff34e3Sthurlow return (error); 1574bd7c6f51SGordon Ross oldva.va_mask |= AT_UID | AT_GID; 1575bd7c6f51SGordon Ross oldva.va_uid = smi->smi_uid; 1576bd7c6f51SGordon Ross oldva.va_gid = smi->smi_gid; 15774bff34e3Sthurlow 15784bff34e3Sthurlow error = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags, 15794bff34e3Sthurlow smbfs_accessx, vp); 15804bff34e3Sthurlow if (error) 15814bff34e3Sthurlow return (error); 15824bff34e3Sthurlow 1583bd7c6f51SGordon Ross if (mask & (AT_UID | AT_GID)) { 1584bd7c6f51SGordon Ross if (smi->smi_flags & SMI_ACL) 1585bd7c6f51SGordon Ross error = smbfs_acl_setids(vp, vap, cr); 1586bd7c6f51SGordon Ross else 1587bd7c6f51SGordon Ross error = ENOSYS; 1588bd7c6f51SGordon Ross if (error != 0) { 1589bd7c6f51SGordon Ross SMBVDEBUG("error %d seting UID/GID on %s", 1590bd7c6f51SGordon Ross error, VTOSMB(vp)->n_rpath); 1591bd7c6f51SGordon Ross /* 1592bd7c6f51SGordon Ross * It might be more correct to return the 1593bd7c6f51SGordon Ross * error here, but that causes complaints 1594bd7c6f51SGordon Ross * when root extracts a cpio archive, etc. 1595bd7c6f51SGordon Ross * So ignore this error, and go ahead with 1596bd7c6f51SGordon Ross * the rest of the setattr work. 1597bd7c6f51SGordon Ross */ 1598bd7c6f51SGordon Ross } 1599bd7c6f51SGordon Ross } 1600bd7c6f51SGordon Ross 16015f4fc069Sjilinxpd error = smbfssetattr(vp, vap, flags, cr); 16025f4fc069Sjilinxpd 16035f4fc069Sjilinxpd #ifdef SMBFS_VNEVENT 16045f4fc069Sjilinxpd if (error == 0 && (vap->va_mask & AT_SIZE) && vap->va_size == 0) 16055f4fc069Sjilinxpd vnevent_truncate(vp, ct); 16065f4fc069Sjilinxpd #endif 16075f4fc069Sjilinxpd 16085f4fc069Sjilinxpd return (error); 16094bff34e3Sthurlow } 16104bff34e3Sthurlow 16114bff34e3Sthurlow /* 16124bff34e3Sthurlow * Mostly from Darwin smbfs_setattr() 16134bff34e3Sthurlow * but then modified a lot. 16144bff34e3Sthurlow */ 16154bff34e3Sthurlow /* ARGSUSED */ 16164bff34e3Sthurlow static int 16174bff34e3Sthurlow smbfssetattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr) 16184bff34e3Sthurlow { 16194bff34e3Sthurlow int error = 0; 16204bff34e3Sthurlow smbnode_t *np = VTOSMB(vp); 16214bff34e3Sthurlow uint_t mask = vap->va_mask; 16224bff34e3Sthurlow struct timespec *mtime, *atime; 16234bff34e3Sthurlow struct smb_cred scred; 16244bff34e3Sthurlow int cerror, modified = 0; 16254bff34e3Sthurlow unsigned short fid; 16264bff34e3Sthurlow int have_fid = 0; 16274bff34e3Sthurlow uint32_t rights = 0; 162828162916SGordon Ross uint32_t dosattr = 0; 16294bff34e3Sthurlow 1630a19609f8Sjv ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone_ref.zref_zone); 16314bff34e3Sthurlow 163291d632c8Sgwr /* 163391d632c8Sgwr * There are no settable attributes on the XATTR dir, 163491d632c8Sgwr * so just silently ignore these. On XATTR files, 163591d632c8Sgwr * you can set the size but nothing else. 163691d632c8Sgwr */ 163791d632c8Sgwr if (vp->v_flag & V_XATTRDIR) 163891d632c8Sgwr return (0); 163991d632c8Sgwr if (np->n_flag & N_XATTR) { 164091d632c8Sgwr if (mask & AT_TIMES) 164191d632c8Sgwr SMBVDEBUG("ignore set time on xattr\n"); 164291d632c8Sgwr mask &= AT_SIZE; 164391d632c8Sgwr } 164491d632c8Sgwr 16455f4fc069Sjilinxpd /* 16465f4fc069Sjilinxpd * Only need to flush pages if there are any pages and 16475f4fc069Sjilinxpd * if the file is marked as dirty in some fashion. The 16485f4fc069Sjilinxpd * file must be flushed so that we can accurately 16495f4fc069Sjilinxpd * determine the size of the file and the cached data 16505f4fc069Sjilinxpd * after the SETATTR returns. A file is considered to 16515f4fc069Sjilinxpd * be dirty if it is either marked with RDIRTY, has 16525f4fc069Sjilinxpd * outstanding i/o's active, or is mmap'd. In this 16535f4fc069Sjilinxpd * last case, we can't tell whether there are dirty 16545f4fc069Sjilinxpd * pages, so we flush just to be sure. 16555f4fc069Sjilinxpd */ 16565f4fc069Sjilinxpd if (vn_has_cached_data(vp) && 16575f4fc069Sjilinxpd ((np->r_flags & RDIRTY) || 16585f4fc069Sjilinxpd np->r_count > 0 || 16595f4fc069Sjilinxpd np->r_mapcnt > 0)) { 16605f4fc069Sjilinxpd ASSERT(vp->v_type != VCHR); 16615f4fc069Sjilinxpd error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, NULL); 16625f4fc069Sjilinxpd if (error && (error == ENOSPC || error == EDQUOT)) { 16635f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 16645f4fc069Sjilinxpd if (!np->r_error) 16655f4fc069Sjilinxpd np->r_error = error; 16665f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 16675f4fc069Sjilinxpd } 16685f4fc069Sjilinxpd } 16695f4fc069Sjilinxpd 16704bff34e3Sthurlow /* 16714bff34e3Sthurlow * If our caller is trying to set multiple attributes, they 16724bff34e3Sthurlow * can make no assumption about what order they are done in. 16734bff34e3Sthurlow * Here we try to do them in order of decreasing likelihood 16744bff34e3Sthurlow * of failure, just to minimize the chance we'll wind up 16754bff34e3Sthurlow * with a partially complete request. 16764bff34e3Sthurlow */ 16774bff34e3Sthurlow 16784bff34e3Sthurlow /* Shared lock for (possible) n_fid use. */ 16794bff34e3Sthurlow if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp))) 16804bff34e3Sthurlow return (EINTR); 1681613a2f6bSGordon Ross smb_credinit(&scred, cr); 16824bff34e3Sthurlow 168328162916SGordon Ross /* 168428162916SGordon Ross * If the caller has provided extensible attributes, 168528162916SGordon Ross * map those into DOS attributes supported by SMB. 168628162916SGordon Ross * Note: zero means "no change". 168728162916SGordon Ross */ 168828162916SGordon Ross if (mask & AT_XVATTR) 168928162916SGordon Ross dosattr = xvattr_to_dosattr(np, vap); 169028162916SGordon Ross 16914bff34e3Sthurlow /* 16924bff34e3Sthurlow * Will we need an open handle for this setattr? 16934bff34e3Sthurlow * If so, what rights will we need? 16944bff34e3Sthurlow */ 169528162916SGordon Ross if (dosattr || (mask & (AT_ATIME | AT_MTIME))) { 16964bff34e3Sthurlow rights |= 169702d09e03SGordon Ross SA_RIGHT_FILE_WRITE_ATTRIBUTES; 16984bff34e3Sthurlow } 16994bff34e3Sthurlow if (mask & AT_SIZE) { 17004bff34e3Sthurlow rights |= 17014bff34e3Sthurlow SA_RIGHT_FILE_WRITE_DATA | 17024bff34e3Sthurlow SA_RIGHT_FILE_APPEND_DATA; 170302d09e03SGordon Ross } 170402d09e03SGordon Ross 170502d09e03SGordon Ross /* 170602d09e03SGordon Ross * Only SIZE really requires a handle, but it's 170702d09e03SGordon Ross * simpler and more reliable to set via a handle. 170802d09e03SGordon Ross * Some servers like NT4 won't set times by path. 170902d09e03SGordon Ross * Also, we're usually setting everything anyway. 171002d09e03SGordon Ross */ 171128162916SGordon Ross if (rights != 0) { 17124bff34e3Sthurlow error = smbfs_smb_tmpopen(np, rights, &scred, &fid); 17134bff34e3Sthurlow if (error) { 17144bff34e3Sthurlow SMBVDEBUG("error %d opening %s\n", 17154bff34e3Sthurlow error, np->n_rpath); 17164bff34e3Sthurlow goto out; 17174bff34e3Sthurlow } 17184bff34e3Sthurlow have_fid = 1; 17194bff34e3Sthurlow } 17204bff34e3Sthurlow 17214bff34e3Sthurlow /* 17224bff34e3Sthurlow * If the server supports the UNIX extensions, right here is where 17234bff34e3Sthurlow * we'd support changes to uid, gid, mode, and possibly va_flags. 17244bff34e3Sthurlow * For now we claim to have made any such changes. 17254bff34e3Sthurlow */ 17264bff34e3Sthurlow 17274bff34e3Sthurlow if (mask & AT_SIZE) { 17284bff34e3Sthurlow /* 17294bff34e3Sthurlow * If the new file size is less than what the client sees as 17304bff34e3Sthurlow * the file size, then just change the size and invalidate 17314bff34e3Sthurlow * the pages. 17324bff34e3Sthurlow */ 17334bff34e3Sthurlow 17344bff34e3Sthurlow /* 17354bff34e3Sthurlow * Set the file size to vap->va_size. 17364bff34e3Sthurlow */ 17374bff34e3Sthurlow ASSERT(have_fid); 17384bff34e3Sthurlow error = smbfs_smb_setfsize(np, fid, vap->va_size, &scred); 17394bff34e3Sthurlow if (error) { 17404bff34e3Sthurlow SMBVDEBUG("setsize error %d file %s\n", 17414bff34e3Sthurlow error, np->n_rpath); 17424bff34e3Sthurlow } else { 17434bff34e3Sthurlow /* 17444bff34e3Sthurlow * Darwin had code here to zero-extend. 17454bff34e3Sthurlow * Tests indicate the server will zero-fill, 17465f4fc069Sjilinxpd * so looks like we don't need to do that. 17474bff34e3Sthurlow */ 17484bff34e3Sthurlow mutex_enter(&np->r_statelock); 17494bff34e3Sthurlow np->r_size = vap->va_size; 17504bff34e3Sthurlow mutex_exit(&np->r_statelock); 17514bff34e3Sthurlow modified = 1; 17524bff34e3Sthurlow } 17534bff34e3Sthurlow } 17544bff34e3Sthurlow 17554bff34e3Sthurlow /* 17565f4fc069Sjilinxpd * Todo: Implement setting create_time (which is 17575f4fc069Sjilinxpd * different from ctime). 17584bff34e3Sthurlow */ 17594bff34e3Sthurlow mtime = ((mask & AT_MTIME) ? &vap->va_mtime : 0); 17604bff34e3Sthurlow atime = ((mask & AT_ATIME) ? &vap->va_atime : 0); 17614bff34e3Sthurlow 176228162916SGordon Ross if (dosattr || mtime || atime) { 17634bff34e3Sthurlow /* 176402d09e03SGordon Ross * Always use the handle-based set attr call now. 17654bff34e3Sthurlow */ 176602d09e03SGordon Ross ASSERT(have_fid); 176702d09e03SGordon Ross error = smbfs_smb_setfattr(np, fid, 176828162916SGordon Ross dosattr, mtime, atime, &scred); 17694bff34e3Sthurlow if (error) { 17704bff34e3Sthurlow SMBVDEBUG("set times error %d file %s\n", 17714bff34e3Sthurlow error, np->n_rpath); 17724bff34e3Sthurlow } else { 17734bff34e3Sthurlow modified = 1; 17744bff34e3Sthurlow } 17754bff34e3Sthurlow } 17764bff34e3Sthurlow 17774bff34e3Sthurlow out: 17784bff34e3Sthurlow if (have_fid) { 17794bff34e3Sthurlow cerror = smbfs_smb_tmpclose(np, fid, &scred); 17804bff34e3Sthurlow if (cerror) 178102d09e03SGordon Ross SMBVDEBUG("error %d closing %s\n", 17824bff34e3Sthurlow cerror, np->n_rpath); 17834bff34e3Sthurlow } 17844bff34e3Sthurlow 17854bff34e3Sthurlow smb_credrele(&scred); 17864bff34e3Sthurlow smbfs_rw_exit(&np->r_lkserlock); 17874bff34e3Sthurlow 17885f4fc069Sjilinxpd if (modified) { 17895f4fc069Sjilinxpd /* 17905f4fc069Sjilinxpd * Invalidate attribute cache in case the server 17915f4fc069Sjilinxpd * doesn't set exactly the attributes we asked. 17925f4fc069Sjilinxpd */ 17935f4fc069Sjilinxpd smbfs_attrcache_remove(np); 17945f4fc069Sjilinxpd 17955f4fc069Sjilinxpd /* 17965f4fc069Sjilinxpd * If changing the size of the file, invalidate 17975f4fc069Sjilinxpd * any local cached data which is no longer part 17985f4fc069Sjilinxpd * of the file. We also possibly invalidate the 17995f4fc069Sjilinxpd * last page in the file. We could use 18005f4fc069Sjilinxpd * pvn_vpzero(), but this would mark the page as 18015f4fc069Sjilinxpd * modified and require it to be written back to 18025f4fc069Sjilinxpd * the server for no particularly good reason. 18035f4fc069Sjilinxpd * This way, if we access it, then we bring it 18045f4fc069Sjilinxpd * back in. A read should be cheaper than a 18055f4fc069Sjilinxpd * write. 18065f4fc069Sjilinxpd */ 18075f4fc069Sjilinxpd if (mask & AT_SIZE) { 18085f4fc069Sjilinxpd smbfs_invalidate_pages(vp, 18095f4fc069Sjilinxpd (vap->va_size & PAGEMASK), cr); 18105f4fc069Sjilinxpd } 18115f4fc069Sjilinxpd } 18125f4fc069Sjilinxpd 18134bff34e3Sthurlow return (error); 18144bff34e3Sthurlow } 18154bff34e3Sthurlow 181628162916SGordon Ross /* 181728162916SGordon Ross * Helper function for extensible system attributes (PSARC 2007/315) 181828162916SGordon Ross * Compute the DOS attribute word to pass to _setfattr (see above). 181928162916SGordon Ross * This returns zero IFF no change is being made to attributes. 182028162916SGordon Ross * Otherwise return the new attributes or SMB_EFA_NORMAL. 182128162916SGordon Ross */ 182228162916SGordon Ross static uint32_t 182328162916SGordon Ross xvattr_to_dosattr(smbnode_t *np, struct vattr *vap) 182428162916SGordon Ross { 182528162916SGordon Ross xvattr_t *xvap = (xvattr_t *)vap; 182628162916SGordon Ross xoptattr_t *xoap = NULL; 182728162916SGordon Ross uint32_t attr = np->r_attr.fa_attr; 182828162916SGordon Ross boolean_t anyset = B_FALSE; 182928162916SGordon Ross 183028162916SGordon Ross if ((xoap = xva_getxoptattr(xvap)) == NULL) 183128162916SGordon Ross return (0); 183228162916SGordon Ross 183328162916SGordon Ross if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { 183428162916SGordon Ross if (xoap->xoa_archive) 183528162916SGordon Ross attr |= SMB_FA_ARCHIVE; 183628162916SGordon Ross else 183728162916SGordon Ross attr &= ~SMB_FA_ARCHIVE; 183828162916SGordon Ross XVA_SET_RTN(xvap, XAT_ARCHIVE); 183928162916SGordon Ross anyset = B_TRUE; 184028162916SGordon Ross } 184128162916SGordon Ross if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) { 184228162916SGordon Ross if (xoap->xoa_system) 184328162916SGordon Ross attr |= SMB_FA_SYSTEM; 184428162916SGordon Ross else 184528162916SGordon Ross attr &= ~SMB_FA_SYSTEM; 184628162916SGordon Ross XVA_SET_RTN(xvap, XAT_SYSTEM); 184728162916SGordon Ross anyset = B_TRUE; 184828162916SGordon Ross } 184928162916SGordon Ross if (XVA_ISSET_REQ(xvap, XAT_READONLY)) { 185028162916SGordon Ross if (xoap->xoa_readonly) 185128162916SGordon Ross attr |= SMB_FA_RDONLY; 185228162916SGordon Ross else 185328162916SGordon Ross attr &= ~SMB_FA_RDONLY; 185428162916SGordon Ross XVA_SET_RTN(xvap, XAT_READONLY); 185528162916SGordon Ross anyset = B_TRUE; 185628162916SGordon Ross } 185728162916SGordon Ross if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) { 185828162916SGordon Ross if (xoap->xoa_hidden) 185928162916SGordon Ross attr |= SMB_FA_HIDDEN; 186028162916SGordon Ross else 186128162916SGordon Ross attr &= ~SMB_FA_HIDDEN; 186228162916SGordon Ross XVA_SET_RTN(xvap, XAT_HIDDEN); 186328162916SGordon Ross anyset = B_TRUE; 186428162916SGordon Ross } 186528162916SGordon Ross 186628162916SGordon Ross if (anyset == B_FALSE) 186728162916SGordon Ross return (0); /* no change */ 186828162916SGordon Ross if (attr == 0) 186928162916SGordon Ross attr = SMB_EFA_NORMAL; 187028162916SGordon Ross 187128162916SGordon Ross return (attr); 187228162916SGordon Ross } 187328162916SGordon Ross 18744bff34e3Sthurlow /* 18754bff34e3Sthurlow * smbfs_access_rwx() 18764bff34e3Sthurlow * Common function for smbfs_access, etc. 18774bff34e3Sthurlow * 18784bff34e3Sthurlow * The security model implemented by the FS is unusual 1879bd7c6f51SGordon Ross * due to the current "single user mounts" restriction: 18804bff34e3Sthurlow * All access under a given mount point uses the CIFS 18814bff34e3Sthurlow * credentials established by the owner of the mount. 18824bff34e3Sthurlow * 18834bff34e3Sthurlow * Most access checking is handled by the CIFS server, 18844bff34e3Sthurlow * but we need sufficient Unix access checks here to 18854bff34e3Sthurlow * prevent other local Unix users from having access 18864bff34e3Sthurlow * to objects under this mount that the uid/gid/mode 18874bff34e3Sthurlow * settings in the mount would not allow. 18884bff34e3Sthurlow * 18894bff34e3Sthurlow * With this model, there is a case where we need the 18904bff34e3Sthurlow * ability to do an access check before we have the 18914bff34e3Sthurlow * vnode for an object. This function takes advantage 18924bff34e3Sthurlow * of the fact that the uid/gid/mode is per mount, and 18934bff34e3Sthurlow * avoids the need for a vnode. 18944bff34e3Sthurlow * 18954bff34e3Sthurlow * We still (sort of) need a vnode when we call 18964bff34e3Sthurlow * secpolicy_vnode_access, but that only uses 18974bff34e3Sthurlow * the vtype field, so we can use a pair of fake 18984bff34e3Sthurlow * vnodes that have only v_type filled in. 18994bff34e3Sthurlow */ 19004bff34e3Sthurlow static int 19014bff34e3Sthurlow smbfs_access_rwx(vfs_t *vfsp, int vtype, int mode, cred_t *cr) 19024bff34e3Sthurlow { 19034bff34e3Sthurlow /* See the secpolicy call below. */ 19044bff34e3Sthurlow static const vnode_t tmpl_vdir = { .v_type = VDIR }; 19054bff34e3Sthurlow static const vnode_t tmpl_vreg = { .v_type = VREG }; 19064bff34e3Sthurlow vattr_t va; 19074bff34e3Sthurlow vnode_t *tvp; 19084bff34e3Sthurlow struct smbmntinfo *smi = VFTOSMI(vfsp); 19094bff34e3Sthurlow int shift = 0; 19104bff34e3Sthurlow 19114bff34e3Sthurlow /* 19124bff34e3Sthurlow * Build our (fabricated) vnode attributes. 19134bff34e3Sthurlow */ 19144bff34e3Sthurlow bzero(&va, sizeof (va)); 19154bff34e3Sthurlow va.va_mask = AT_TYPE | AT_MODE | AT_UID | AT_GID; 19164bff34e3Sthurlow va.va_type = vtype; 19174bff34e3Sthurlow va.va_mode = (vtype == VDIR) ? 191802d09e03SGordon Ross smi->smi_dmode : smi->smi_fmode; 191902d09e03SGordon Ross va.va_uid = smi->smi_uid; 192002d09e03SGordon Ross va.va_gid = smi->smi_gid; 19214bff34e3Sthurlow 19224bff34e3Sthurlow /* 19234bff34e3Sthurlow * Disallow write attempts on read-only file systems, 19244bff34e3Sthurlow * unless the file is a device or fifo node. Note: 19254bff34e3Sthurlow * Inline vn_is_readonly and IS_DEVVP here because 19264bff34e3Sthurlow * we may not have a vnode ptr. Original expr. was: 19274bff34e3Sthurlow * (mode & VWRITE) && vn_is_readonly(vp) && !IS_DEVVP(vp)) 19284bff34e3Sthurlow */ 19294bff34e3Sthurlow if ((mode & VWRITE) && 19304bff34e3Sthurlow (vfsp->vfs_flag & VFS_RDONLY) && 19314bff34e3Sthurlow !(vtype == VCHR || vtype == VBLK || vtype == VFIFO)) 19324bff34e3Sthurlow return (EROFS); 19334bff34e3Sthurlow 19344bff34e3Sthurlow /* 19354bff34e3Sthurlow * Disallow attempts to access mandatory lock files. 19364bff34e3Sthurlow * Similarly, expand MANDLOCK here. 19374bff34e3Sthurlow */ 19384bff34e3Sthurlow if ((mode & (VWRITE | VREAD | VEXEC)) && 19394bff34e3Sthurlow va.va_type == VREG && MANDMODE(va.va_mode)) 19404bff34e3Sthurlow return (EACCES); 19414bff34e3Sthurlow 19424bff34e3Sthurlow /* 19434bff34e3Sthurlow * Access check is based on only 19444bff34e3Sthurlow * one of owner, group, public. 19454bff34e3Sthurlow * If not owner, then check group. 19464bff34e3Sthurlow * If not a member of the group, 19474bff34e3Sthurlow * then check public access. 19484bff34e3Sthurlow */ 19494bff34e3Sthurlow if (crgetuid(cr) != va.va_uid) { 19504bff34e3Sthurlow shift += 3; 19514bff34e3Sthurlow if (!groupmember(va.va_gid, cr)) 19524bff34e3Sthurlow shift += 3; 19534bff34e3Sthurlow } 19544bff34e3Sthurlow 19554bff34e3Sthurlow /* 19564bff34e3Sthurlow * We need a vnode for secpolicy_vnode_access, 19574bff34e3Sthurlow * but the only thing it looks at is v_type, 19584bff34e3Sthurlow * so pass one of the templates above. 19594bff34e3Sthurlow */ 19604bff34e3Sthurlow tvp = (va.va_type == VDIR) ? 19614bff34e3Sthurlow (vnode_t *)&tmpl_vdir : 19624bff34e3Sthurlow (vnode_t *)&tmpl_vreg; 1963134a1f4eSCasper H.S. Dik 1964134a1f4eSCasper H.S. Dik return (secpolicy_vnode_access2(cr, tvp, va.va_uid, 1965134a1f4eSCasper H.S. Dik va.va_mode << shift, mode)); 19664bff34e3Sthurlow } 19674bff34e3Sthurlow 19684bff34e3Sthurlow /* 19694bff34e3Sthurlow * See smbfs_setattr 19704bff34e3Sthurlow */ 19714bff34e3Sthurlow static int 19724bff34e3Sthurlow smbfs_accessx(void *arg, int mode, cred_t *cr) 19734bff34e3Sthurlow { 19744bff34e3Sthurlow vnode_t *vp = arg; 19754bff34e3Sthurlow /* 19764bff34e3Sthurlow * Note: The caller has checked the current zone, 19774bff34e3Sthurlow * the SMI_DEAD and VFS_UNMOUNTED flags, etc. 19784bff34e3Sthurlow */ 19794bff34e3Sthurlow return (smbfs_access_rwx(vp->v_vfsp, vp->v_type, mode, cr)); 19804bff34e3Sthurlow } 19814bff34e3Sthurlow 19824bff34e3Sthurlow /* 19834bff34e3Sthurlow * XXX 19844bff34e3Sthurlow * This op should support PSARC 2007/403, Modified Access Checks for CIFS 19854bff34e3Sthurlow */ 19864bff34e3Sthurlow /* ARGSUSED */ 19874bff34e3Sthurlow static int 19884bff34e3Sthurlow smbfs_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct) 19894bff34e3Sthurlow { 19904bff34e3Sthurlow vfs_t *vfsp; 19914bff34e3Sthurlow smbmntinfo_t *smi; 19924bff34e3Sthurlow 19934bff34e3Sthurlow vfsp = vp->v_vfsp; 19944bff34e3Sthurlow smi = VFTOSMI(vfsp); 19954bff34e3Sthurlow 1996a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 19974bff34e3Sthurlow return (EIO); 19984bff34e3Sthurlow 19994bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED) 20004bff34e3Sthurlow return (EIO); 20014bff34e3Sthurlow 20024bff34e3Sthurlow return (smbfs_access_rwx(vfsp, vp->v_type, mode, cr)); 20034bff34e3Sthurlow } 20044bff34e3Sthurlow 20054bff34e3Sthurlow 20065f4fc069Sjilinxpd /* ARGSUSED */ 20075f4fc069Sjilinxpd static int 20085f4fc069Sjilinxpd smbfs_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct) 20095f4fc069Sjilinxpd { 20105f4fc069Sjilinxpd /* Not yet... */ 20115f4fc069Sjilinxpd return (ENOSYS); 20125f4fc069Sjilinxpd } 20135f4fc069Sjilinxpd 20145f4fc069Sjilinxpd 20154bff34e3Sthurlow /* 20164bff34e3Sthurlow * Flush local dirty pages to stable storage on the server. 20174bff34e3Sthurlow * 20184bff34e3Sthurlow * If FNODSYNC is specified, then there is nothing to do because 20194bff34e3Sthurlow * metadata changes are not cached on the client before being 20204bff34e3Sthurlow * sent to the server. 20214bff34e3Sthurlow */ 20224bff34e3Sthurlow /* ARGSUSED */ 20234bff34e3Sthurlow static int 20244bff34e3Sthurlow smbfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct) 20254bff34e3Sthurlow { 20264bff34e3Sthurlow int error = 0; 20274bff34e3Sthurlow smbmntinfo_t *smi; 20285f4fc069Sjilinxpd smbnode_t *np; 20292f5e3e91SGordon Ross struct smb_cred scred; 20304bff34e3Sthurlow 20312f5e3e91SGordon Ross np = VTOSMB(vp); 20324bff34e3Sthurlow smi = VTOSMI(vp); 20334bff34e3Sthurlow 2034a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 20354bff34e3Sthurlow return (EIO); 20364bff34e3Sthurlow 20374bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 20384bff34e3Sthurlow return (EIO); 20394bff34e3Sthurlow 20404bff34e3Sthurlow if ((syncflag & FNODSYNC) || IS_SWAPVP(vp)) 20414bff34e3Sthurlow return (0); 20424bff34e3Sthurlow 20432f5e3e91SGordon Ross if ((syncflag & (FSYNC|FDSYNC)) == 0) 20442f5e3e91SGordon Ross return (0); 20452f5e3e91SGordon Ross 20465f4fc069Sjilinxpd error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct); 20475f4fc069Sjilinxpd if (error) 20485f4fc069Sjilinxpd return (error); 20495f4fc069Sjilinxpd 20502f5e3e91SGordon Ross /* Shared lock for n_fid use in _flush */ 20512f5e3e91SGordon Ross if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp))) 20522f5e3e91SGordon Ross return (EINTR); 2053613a2f6bSGordon Ross smb_credinit(&scred, cr); 20542f5e3e91SGordon Ross 20552f5e3e91SGordon Ross error = smbfs_smb_flush(np, &scred); 20562f5e3e91SGordon Ross 20572f5e3e91SGordon Ross smb_credrele(&scred); 20582f5e3e91SGordon Ross smbfs_rw_exit(&np->r_lkserlock); 20592f5e3e91SGordon Ross 20604bff34e3Sthurlow return (error); 20614bff34e3Sthurlow } 20624bff34e3Sthurlow 20634bff34e3Sthurlow /* 20644bff34e3Sthurlow * Last reference to vnode went away. 20654bff34e3Sthurlow */ 20664bff34e3Sthurlow /* ARGSUSED */ 20674bff34e3Sthurlow static void 20684bff34e3Sthurlow smbfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) 20694bff34e3Sthurlow { 207042d15982SGordon Ross struct smb_cred scred; 20715f4fc069Sjilinxpd smbnode_t *np = VTOSMB(vp); 20725f4fc069Sjilinxpd int error; 20734bff34e3Sthurlow 20744bff34e3Sthurlow /* 20754bff34e3Sthurlow * Don't "bail out" for VFS_UNMOUNTED here, 20764bff34e3Sthurlow * as we want to do cleanup, etc. 20774bff34e3Sthurlow * See also pcfs_inactive 20784bff34e3Sthurlow */ 20794bff34e3Sthurlow 20804bff34e3Sthurlow /* 20814bff34e3Sthurlow * If this is coming from the wrong zone, we let someone in the right 20824bff34e3Sthurlow * zone take care of it asynchronously. We can get here due to 20834bff34e3Sthurlow * VN_RELE() being called from pageout() or fsflush(). This call may 20844bff34e3Sthurlow * potentially turn into an expensive no-op if, for instance, v_count 20854bff34e3Sthurlow * gets incremented in the meantime, but it's still correct. 20864bff34e3Sthurlow */ 20874bff34e3Sthurlow 20885f4fc069Sjilinxpd /* 20895f4fc069Sjilinxpd * From NFS:rinactive() 20905f4fc069Sjilinxpd * 20915f4fc069Sjilinxpd * Before freeing anything, wait until all asynchronous 20925f4fc069Sjilinxpd * activity is done on this rnode. This will allow all 20935f4fc069Sjilinxpd * asynchronous read ahead and write behind i/o's to 20945f4fc069Sjilinxpd * finish. 20955f4fc069Sjilinxpd */ 20965f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 20975f4fc069Sjilinxpd while (np->r_count > 0) 20985f4fc069Sjilinxpd cv_wait(&np->r_cv, &np->r_statelock); 20995f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 21005f4fc069Sjilinxpd 21015f4fc069Sjilinxpd /* 21025f4fc069Sjilinxpd * Flush and invalidate all pages associated with the vnode. 21035f4fc069Sjilinxpd */ 21045f4fc069Sjilinxpd if (vn_has_cached_data(vp)) { 21055f4fc069Sjilinxpd if ((np->r_flags & RDIRTY) && !np->r_error) { 21065f4fc069Sjilinxpd error = smbfs_putpage(vp, (u_offset_t)0, 0, 0, cr, ct); 21075f4fc069Sjilinxpd if (error && (error == ENOSPC || error == EDQUOT)) { 21085f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 21095f4fc069Sjilinxpd if (!np->r_error) 21105f4fc069Sjilinxpd np->r_error = error; 21115f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 21125f4fc069Sjilinxpd } 21135f4fc069Sjilinxpd } 21145f4fc069Sjilinxpd smbfs_invalidate_pages(vp, (u_offset_t)0, cr); 21155f4fc069Sjilinxpd } 21165f4fc069Sjilinxpd /* 21175f4fc069Sjilinxpd * This vnode should have lost all cached data. 21185f4fc069Sjilinxpd */ 21195f4fc069Sjilinxpd ASSERT(vn_has_cached_data(vp) == 0); 21205f4fc069Sjilinxpd 21214bff34e3Sthurlow /* 212242d15982SGordon Ross * Defend against the possibility that higher-level callers 212342d15982SGordon Ross * might not correctly balance open and close calls. If we 212442d15982SGordon Ross * get here with open references remaining, it means there 212542d15982SGordon Ross * was a missing VOP_CLOSE somewhere. If that happens, do 212642d15982SGordon Ross * the close here so we don't "leak" FIDs on the server. 21274bff34e3Sthurlow * 212842d15982SGordon Ross * Exclusive lock for modifying n_fid stuff. 212942d15982SGordon Ross * Don't want this one ever interruptible. 21304bff34e3Sthurlow */ 213142d15982SGordon Ross (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0); 213242d15982SGordon Ross smb_credinit(&scred, cr); 213342d15982SGordon Ross 213442d15982SGordon Ross switch (np->n_ovtype) { 213542d15982SGordon Ross case VNON: 213642d15982SGordon Ross /* not open (OK) */ 213742d15982SGordon Ross break; 213842d15982SGordon Ross 213942d15982SGordon Ross case VDIR: 214042d15982SGordon Ross if (np->n_dirrefs == 0) 214142d15982SGordon Ross break; 214242d15982SGordon Ross SMBVDEBUG("open dir: refs %d path %s\n", 214342d15982SGordon Ross np->n_dirrefs, np->n_rpath); 214442d15982SGordon Ross /* Force last close. */ 214542d15982SGordon Ross np->n_dirrefs = 1; 214642d15982SGordon Ross smbfs_rele_fid(np, &scred); 214742d15982SGordon Ross break; 214842d15982SGordon Ross 214942d15982SGordon Ross case VREG: 215042d15982SGordon Ross if (np->n_fidrefs == 0) 215142d15982SGordon Ross break; 215242d15982SGordon Ross SMBVDEBUG("open file: refs %d id 0x%x path %s\n", 21534bff34e3Sthurlow np->n_fidrefs, np->n_fid, np->n_rpath); 215442d15982SGordon Ross /* Force last close. */ 215542d15982SGordon Ross np->n_fidrefs = 1; 215642d15982SGordon Ross smbfs_rele_fid(np, &scred); 215742d15982SGordon Ross break; 215842d15982SGordon Ross 215942d15982SGordon Ross default: 216042d15982SGordon Ross SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype); 216142d15982SGordon Ross np->n_ovtype = VNON; 216242d15982SGordon Ross break; 21634bff34e3Sthurlow } 216442d15982SGordon Ross 216542d15982SGordon Ross smb_credrele(&scred); 216642d15982SGordon Ross smbfs_rw_exit(&np->r_lkserlock); 21674bff34e3Sthurlow 2168ff1e230cSjilinxpd /* 2169ff1e230cSjilinxpd * XATTR directories (and the files under them) have 2170ff1e230cSjilinxpd * little value for reclaim, so just remove them from 2171ff1e230cSjilinxpd * the "hash" (AVL) as soon as they go inactive. 2172ff1e230cSjilinxpd * Note that the node may already have been removed 2173ff1e230cSjilinxpd * from the hash by smbfsremove. 2174ff1e230cSjilinxpd */ 2175ff1e230cSjilinxpd if ((np->n_flag & N_XATTR) != 0 && 2176ff1e230cSjilinxpd (np->r_flags & RHASHED) != 0) 2177ff1e230cSjilinxpd smbfs_rmhash(np); 2178ff1e230cSjilinxpd 217902d09e03SGordon Ross smbfs_addfree(np); 21804bff34e3Sthurlow } 21814bff34e3Sthurlow 21824bff34e3Sthurlow /* 21834bff34e3Sthurlow * Remote file system operations having to do with directory manipulation. 21844bff34e3Sthurlow */ 21854bff34e3Sthurlow /* ARGSUSED */ 21864bff34e3Sthurlow static int 21874bff34e3Sthurlow smbfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, 21884bff34e3Sthurlow int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, 21894bff34e3Sthurlow int *direntflags, pathname_t *realpnp) 21904bff34e3Sthurlow { 219191d632c8Sgwr vfs_t *vfs; 21924bff34e3Sthurlow smbmntinfo_t *smi; 219391d632c8Sgwr smbnode_t *dnp; 219491d632c8Sgwr int error; 21954bff34e3Sthurlow 219691d632c8Sgwr vfs = dvp->v_vfsp; 219791d632c8Sgwr smi = VFTOSMI(vfs); 21984bff34e3Sthurlow 2199a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 22004bff34e3Sthurlow return (EPERM); 22014bff34e3Sthurlow 220291d632c8Sgwr if (smi->smi_flags & SMI_DEAD || vfs->vfs_flag & VFS_UNMOUNTED) 22034bff34e3Sthurlow return (EIO); 22044bff34e3Sthurlow 22054bff34e3Sthurlow dnp = VTOSMB(dvp); 220691d632c8Sgwr 220791d632c8Sgwr /* 220891d632c8Sgwr * Are we looking up extended attributes? If so, "dvp" is 220991d632c8Sgwr * the file or directory for which we want attributes, and 221091d632c8Sgwr * we need a lookup of the (faked up) attribute directory 221191d632c8Sgwr * before we lookup the rest of the path. 221291d632c8Sgwr */ 221391d632c8Sgwr if (flags & LOOKUP_XATTR) { 221491d632c8Sgwr /* 221591d632c8Sgwr * Require the xattr mount option. 221691d632c8Sgwr */ 221791d632c8Sgwr if ((vfs->vfs_flag & VFS_XATTR) == 0) 221891d632c8Sgwr return (EINVAL); 221991d632c8Sgwr 222091d632c8Sgwr error = smbfs_get_xattrdir(dvp, vpp, cr, flags); 222191d632c8Sgwr return (error); 222291d632c8Sgwr } 222391d632c8Sgwr 222402d09e03SGordon Ross if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_READER, SMBINTR(dvp))) 222502d09e03SGordon Ross return (EINTR); 22264bff34e3Sthurlow 22274bff34e3Sthurlow error = smbfslookup(dvp, nm, vpp, cr, 1, ct); 22284bff34e3Sthurlow 22294bff34e3Sthurlow smbfs_rw_exit(&dnp->r_rwlock); 22304bff34e3Sthurlow 22314bff34e3Sthurlow return (error); 22324bff34e3Sthurlow } 22334bff34e3Sthurlow 22344bff34e3Sthurlow /* ARGSUSED */ 22354bff34e3Sthurlow static int 223602d09e03SGordon Ross smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr, 223702d09e03SGordon Ross int cache_ok, caller_context_t *ct) 22384bff34e3Sthurlow { 22394bff34e3Sthurlow int error; 22404bff34e3Sthurlow int supplen; /* supported length */ 22414bff34e3Sthurlow vnode_t *vp; 224202d09e03SGordon Ross smbnode_t *np; 22434bff34e3Sthurlow smbnode_t *dnp; 22444bff34e3Sthurlow smbmntinfo_t *smi; 22454bff34e3Sthurlow /* struct smb_vc *vcp; */ 224691d632c8Sgwr const char *ill; 22474bff34e3Sthurlow const char *name = (const char *)nm; 22485f4fc069Sjilinxpd int nmlen = strlen(nm); 22495f4fc069Sjilinxpd int rplen; 22504bff34e3Sthurlow struct smb_cred scred; 22514bff34e3Sthurlow struct smbfattr fa; 22524bff34e3Sthurlow 22534bff34e3Sthurlow smi = VTOSMI(dvp); 22544bff34e3Sthurlow dnp = VTOSMB(dvp); 22554bff34e3Sthurlow 2256a19609f8Sjv ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone); 22574bff34e3Sthurlow 22584bff34e3Sthurlow #ifdef NOT_YET 22594bff34e3Sthurlow vcp = SSTOVC(smi->smi_share); 22604bff34e3Sthurlow 22614bff34e3Sthurlow /* XXX: Should compute this once and store it in smbmntinfo_t */ 22624bff34e3Sthurlow supplen = (SMB_DIALECT(vcp) >= SMB_DIALECT_LANMAN2_0) ? 255 : 12; 22634bff34e3Sthurlow #else 22644bff34e3Sthurlow supplen = 255; 22654bff34e3Sthurlow #endif 22664bff34e3Sthurlow 22674bff34e3Sthurlow /* 22684bff34e3Sthurlow * RWlock must be held, either reader or writer. 22694bff34e3Sthurlow */ 22704bff34e3Sthurlow ASSERT(dnp->r_rwlock.count != 0); 22714bff34e3Sthurlow 22724bff34e3Sthurlow /* 227302d09e03SGordon Ross * If lookup is for "", just return dvp. 227402d09e03SGordon Ross * No need to perform any access checks. 22754bff34e3Sthurlow */ 22764bff34e3Sthurlow if (nmlen == 0) { 22774bff34e3Sthurlow VN_HOLD(dvp); 22784bff34e3Sthurlow *vpp = dvp; 22794bff34e3Sthurlow return (0); 22804bff34e3Sthurlow } 22814bff34e3Sthurlow 22824bff34e3Sthurlow /* 228391d632c8Sgwr * Can't do lookups in non-directories. 22844bff34e3Sthurlow */ 22854bff34e3Sthurlow if (dvp->v_type != VDIR) 22864bff34e3Sthurlow return (ENOTDIR); 22874bff34e3Sthurlow 228891d632c8Sgwr /* 228991d632c8Sgwr * Need search permission in the directory. 229091d632c8Sgwr */ 22914bff34e3Sthurlow error = smbfs_access(dvp, VEXEC, 0, cr, ct); 22924bff34e3Sthurlow if (error) 22934bff34e3Sthurlow return (error); 22944bff34e3Sthurlow 22954bff34e3Sthurlow /* 229602d09e03SGordon Ross * If lookup is for ".", just return dvp. 229702d09e03SGordon Ross * Access check was done above. 22984bff34e3Sthurlow */ 22994bff34e3Sthurlow if (nmlen == 1 && name[0] == '.') { 23004bff34e3Sthurlow VN_HOLD(dvp); 23014bff34e3Sthurlow *vpp = dvp; 23024bff34e3Sthurlow return (0); 23034bff34e3Sthurlow } 23044bff34e3Sthurlow 23054bff34e3Sthurlow /* 230691d632c8Sgwr * Now some sanity checks on the name. 230791d632c8Sgwr * First check the length. 23084bff34e3Sthurlow */ 230991d632c8Sgwr if (nmlen > supplen) 231091d632c8Sgwr return (ENAMETOOLONG); 231191d632c8Sgwr 231291d632c8Sgwr /* 231391d632c8Sgwr * Avoid surprises with characters that are 231491d632c8Sgwr * illegal in Windows file names. 23155f4fc069Sjilinxpd * Todo: CATIA mappings? 231691d632c8Sgwr */ 231791d632c8Sgwr ill = illegal_chars; 231891d632c8Sgwr if (dnp->n_flag & N_XATTR) 231991d632c8Sgwr ill++; /* allow colon */ 232091d632c8Sgwr if (strpbrk(nm, ill)) 232191d632c8Sgwr return (EINVAL); 232291d632c8Sgwr 23234bff34e3Sthurlow /* 232402d09e03SGordon Ross * Special handling for lookup of ".." 23254bff34e3Sthurlow * 23264bff34e3Sthurlow * We keep full pathnames (as seen on the server) 23274bff34e3Sthurlow * so we can just trim off the last component to 23284bff34e3Sthurlow * get the full pathname of the parent. Note: 23294bff34e3Sthurlow * We don't actually copy and modify, but just 23304bff34e3Sthurlow * compute the trimmed length and pass that with 23314bff34e3Sthurlow * the current dir path (not null terminated). 23324bff34e3Sthurlow * 23334bff34e3Sthurlow * We don't go over-the-wire to get attributes 23344bff34e3Sthurlow * for ".." because we know it's a directory, 23354bff34e3Sthurlow * and we can just leave the rest "stale" 23364bff34e3Sthurlow * until someone does a getattr. 23374bff34e3Sthurlow */ 23384bff34e3Sthurlow if (nmlen == 2 && name[0] == '.' && name[1] == '.') { 23394bff34e3Sthurlow if (dvp->v_flag & VROOT) { 23404bff34e3Sthurlow /* 23414bff34e3Sthurlow * Already at the root. This can happen 23424bff34e3Sthurlow * with directory listings at the root, 23434bff34e3Sthurlow * which lookup "." and ".." to get the 23444bff34e3Sthurlow * inode numbers. Let ".." be the same 23454bff34e3Sthurlow * as "." in the FS root. 23464bff34e3Sthurlow */ 23474bff34e3Sthurlow VN_HOLD(dvp); 23484bff34e3Sthurlow *vpp = dvp; 23494bff34e3Sthurlow return (0); 23504bff34e3Sthurlow } 23514bff34e3Sthurlow 235291d632c8Sgwr /* 235391d632c8Sgwr * Special case for XATTR directory 235491d632c8Sgwr */ 235591d632c8Sgwr if (dvp->v_flag & V_XATTRDIR) { 235691d632c8Sgwr error = smbfs_xa_parent(dvp, vpp); 235791d632c8Sgwr return (error); 235891d632c8Sgwr } 235991d632c8Sgwr 23604bff34e3Sthurlow /* 23614bff34e3Sthurlow * Find the parent path length. 23624bff34e3Sthurlow */ 23634bff34e3Sthurlow rplen = dnp->n_rplen; 23644bff34e3Sthurlow ASSERT(rplen > 0); 23654bff34e3Sthurlow while (--rplen >= 0) { 23664bff34e3Sthurlow if (dnp->n_rpath[rplen] == '\\') 23674bff34e3Sthurlow break; 23684bff34e3Sthurlow } 236902d09e03SGordon Ross if (rplen <= 0) { 23704bff34e3Sthurlow /* Found our way to the root. */ 23714bff34e3Sthurlow vp = SMBTOV(smi->smi_root); 23724bff34e3Sthurlow VN_HOLD(vp); 23734bff34e3Sthurlow *vpp = vp; 23744bff34e3Sthurlow return (0); 23754bff34e3Sthurlow } 237602d09e03SGordon Ross np = smbfs_node_findcreate(smi, 237702d09e03SGordon Ross dnp->n_rpath, rplen, NULL, 0, 0, 237802d09e03SGordon Ross &smbfs_fattr0); /* force create */ 237902d09e03SGordon Ross ASSERT(np != NULL); 238002d09e03SGordon Ross vp = SMBTOV(np); 23814bff34e3Sthurlow vp->v_type = VDIR; 23824bff34e3Sthurlow 23834bff34e3Sthurlow /* Success! */ 23844bff34e3Sthurlow *vpp = vp; 23854bff34e3Sthurlow return (0); 23864bff34e3Sthurlow } 23874bff34e3Sthurlow 23884bff34e3Sthurlow /* 238902d09e03SGordon Ross * Normal lookup of a name under this directory. 239002d09e03SGordon Ross * Note we handled "", ".", ".." above. 239102d09e03SGordon Ross */ 239202d09e03SGordon Ross if (cache_ok) { 239302d09e03SGordon Ross /* 239402d09e03SGordon Ross * The caller indicated that it's OK to use a 239502d09e03SGordon Ross * cached result for this lookup, so try to 239602d09e03SGordon Ross * reclaim a node from the smbfs node cache. 239702d09e03SGordon Ross */ 239802d09e03SGordon Ross error = smbfslookup_cache(dvp, nm, nmlen, &vp, cr); 239902d09e03SGordon Ross if (error) 240002d09e03SGordon Ross return (error); 240102d09e03SGordon Ross if (vp != NULL) { 240202d09e03SGordon Ross /* hold taken in lookup_cache */ 240302d09e03SGordon Ross *vpp = vp; 240402d09e03SGordon Ross return (0); 240502d09e03SGordon Ross } 240602d09e03SGordon Ross } 240702d09e03SGordon Ross 240802d09e03SGordon Ross /* 240902d09e03SGordon Ross * OK, go over-the-wire to get the attributes, 241002d09e03SGordon Ross * then create the node. 24114bff34e3Sthurlow */ 2412613a2f6bSGordon Ross smb_credinit(&scred, cr); 24134bff34e3Sthurlow /* Note: this can allocate a new "name" */ 24144bff34e3Sthurlow error = smbfs_smb_lookup(dnp, &name, &nmlen, &fa, &scred); 24154bff34e3Sthurlow smb_credrele(&scred); 241602d09e03SGordon Ross if (error == ENOTDIR) { 241702d09e03SGordon Ross /* 241802d09e03SGordon Ross * Lookup failed because this directory was 241902d09e03SGordon Ross * removed or renamed by another client. 242002d09e03SGordon Ross * Remove any cached attributes under it. 242102d09e03SGordon Ross */ 242202d09e03SGordon Ross smbfs_attrcache_remove(dnp); 242302d09e03SGordon Ross smbfs_attrcache_prune(dnp); 242402d09e03SGordon Ross } 24254bff34e3Sthurlow if (error) 24264bff34e3Sthurlow goto out; 24274bff34e3Sthurlow 24284bff34e3Sthurlow error = smbfs_nget(dvp, name, nmlen, &fa, &vp); 24294bff34e3Sthurlow if (error) 24304bff34e3Sthurlow goto out; 24314bff34e3Sthurlow 24324bff34e3Sthurlow /* Success! */ 24334bff34e3Sthurlow *vpp = vp; 24344bff34e3Sthurlow 24354bff34e3Sthurlow out: 24364bff34e3Sthurlow /* smbfs_smb_lookup may have allocated name. */ 24374bff34e3Sthurlow if (name != nm) 24384bff34e3Sthurlow smbfs_name_free(name, nmlen); 24394bff34e3Sthurlow 24404bff34e3Sthurlow return (error); 24414bff34e3Sthurlow } 24424bff34e3Sthurlow 244302d09e03SGordon Ross /* 244402d09e03SGordon Ross * smbfslookup_cache 244502d09e03SGordon Ross * 244602d09e03SGordon Ross * Try to reclaim a node from the smbfs node cache. 244702d09e03SGordon Ross * Some statistics for DEBUG. 244802d09e03SGordon Ross * 244902d09e03SGordon Ross * This mechanism lets us avoid many of the five (or more) 245002d09e03SGordon Ross * OtW lookup calls per file seen with "ls -l" if we search 245102d09e03SGordon Ross * the smbfs node cache for recently inactive(ated) nodes. 245202d09e03SGordon Ross */ 245391d632c8Sgwr #ifdef DEBUG 245402d09e03SGordon Ross int smbfs_lookup_cache_calls = 0; 245502d09e03SGordon Ross int smbfs_lookup_cache_error = 0; 245602d09e03SGordon Ross int smbfs_lookup_cache_miss = 0; 245702d09e03SGordon Ross int smbfs_lookup_cache_stale = 0; 245802d09e03SGordon Ross int smbfs_lookup_cache_hits = 0; 245902d09e03SGordon Ross #endif /* DEBUG */ 246091d632c8Sgwr 246191d632c8Sgwr /* ARGSUSED */ 246291d632c8Sgwr static int 246302d09e03SGordon Ross smbfslookup_cache(vnode_t *dvp, char *nm, int nmlen, 246402d09e03SGordon Ross vnode_t **vpp, cred_t *cr) 246591d632c8Sgwr { 246691d632c8Sgwr struct vattr va; 246791d632c8Sgwr smbnode_t *dnp; 246802d09e03SGordon Ross smbnode_t *np; 246902d09e03SGordon Ross vnode_t *vp; 247002d09e03SGordon Ross int error; 247102d09e03SGordon Ross char sep; 247291d632c8Sgwr 247391d632c8Sgwr dnp = VTOSMB(dvp); 247402d09e03SGordon Ross *vpp = NULL; 247591d632c8Sgwr 247602d09e03SGordon Ross #ifdef DEBUG 247702d09e03SGordon Ross smbfs_lookup_cache_calls++; 247802d09e03SGordon Ross #endif 247991d632c8Sgwr 248091d632c8Sgwr /* 248102d09e03SGordon Ross * First make sure we can get attributes for the 248202d09e03SGordon Ross * directory. Cached attributes are OK here. 248302d09e03SGordon Ross * If we removed or renamed the directory, this 248402d09e03SGordon Ross * will return ENOENT. If someone else removed 248502d09e03SGordon Ross * this directory or file, we'll find out when we 248602d09e03SGordon Ross * try to open or get attributes. 248791d632c8Sgwr */ 248802d09e03SGordon Ross va.va_mask = AT_TYPE | AT_MODE; 248902d09e03SGordon Ross error = smbfsgetattr(dvp, &va, cr); 249002d09e03SGordon Ross if (error) { 249191d632c8Sgwr #ifdef DEBUG 249202d09e03SGordon Ross smbfs_lookup_cache_error++; 249391d632c8Sgwr #endif 249402d09e03SGordon Ross return (error); 249502d09e03SGordon Ross } 249602d09e03SGordon Ross 249702d09e03SGordon Ross /* 249802d09e03SGordon Ross * Passing NULL smbfattr here so we will 249902d09e03SGordon Ross * just look, not create. 250002d09e03SGordon Ross */ 250102d09e03SGordon Ross sep = SMBFS_DNP_SEP(dnp); 250202d09e03SGordon Ross np = smbfs_node_findcreate(dnp->n_mount, 250302d09e03SGordon Ross dnp->n_rpath, dnp->n_rplen, 250402d09e03SGordon Ross nm, nmlen, sep, NULL); 250502d09e03SGordon Ross if (np == NULL) { 250691d632c8Sgwr #ifdef DEBUG 250702d09e03SGordon Ross smbfs_lookup_cache_miss++; 250891d632c8Sgwr #endif 250902d09e03SGordon Ross return (0); 251002d09e03SGordon Ross } 251102d09e03SGordon Ross 251202d09e03SGordon Ross /* 251302d09e03SGordon Ross * Found it. Attributes still valid? 251402d09e03SGordon Ross */ 251502d09e03SGordon Ross vp = SMBTOV(np); 251602d09e03SGordon Ross if (np->r_attrtime <= gethrtime()) { 251702d09e03SGordon Ross /* stale */ 251891d632c8Sgwr #ifdef DEBUG 251902d09e03SGordon Ross smbfs_lookup_cache_stale++; 252091d632c8Sgwr #endif 252102d09e03SGordon Ross VN_RELE(vp); 252202d09e03SGordon Ross return (0); 252391d632c8Sgwr } 252402d09e03SGordon Ross 252502d09e03SGordon Ross /* 252602d09e03SGordon Ross * Success! 252702d09e03SGordon Ross * Caller gets hold from smbfs_node_findcreate 252802d09e03SGordon Ross */ 252991d632c8Sgwr #ifdef DEBUG 253002d09e03SGordon Ross smbfs_lookup_cache_hits++; 253191d632c8Sgwr #endif 253202d09e03SGordon Ross *vpp = vp; 253391d632c8Sgwr return (0); 253491d632c8Sgwr } 253591d632c8Sgwr 25365f4fc069Sjilinxpd 25374bff34e3Sthurlow /* 25384bff34e3Sthurlow * XXX 25394bff34e3Sthurlow * vsecattr_t is new to build 77, and we need to eventually support 25404bff34e3Sthurlow * it in order to create an ACL when an object is created. 25414bff34e3Sthurlow * 25424bff34e3Sthurlow * This op should support the new FIGNORECASE flag for case-insensitive 25434bff34e3Sthurlow * lookups, per PSARC 2007/244. 25444bff34e3Sthurlow */ 25454bff34e3Sthurlow /* ARGSUSED */ 25464bff34e3Sthurlow static int 25474bff34e3Sthurlow smbfs_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive, 25484bff34e3Sthurlow int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct, 25494bff34e3Sthurlow vsecattr_t *vsecp) 25504bff34e3Sthurlow { 25514bff34e3Sthurlow int error; 25524bff34e3Sthurlow int cerror; 25534bff34e3Sthurlow vfs_t *vfsp; 25544bff34e3Sthurlow vnode_t *vp; 25554bff34e3Sthurlow smbnode_t *np; 25564bff34e3Sthurlow smbnode_t *dnp; 25574bff34e3Sthurlow smbmntinfo_t *smi; 25584bff34e3Sthurlow struct vattr vattr; 25594bff34e3Sthurlow struct smbfattr fattr; 25604bff34e3Sthurlow struct smb_cred scred; 25614bff34e3Sthurlow const char *name = (const char *)nm; 25624bff34e3Sthurlow int nmlen = strlen(nm); 25634bff34e3Sthurlow uint32_t disp; 25644bff34e3Sthurlow uint16_t fid; 256591d632c8Sgwr int xattr; 25664bff34e3Sthurlow 25674bff34e3Sthurlow vfsp = dvp->v_vfsp; 25684bff34e3Sthurlow smi = VFTOSMI(vfsp); 25694bff34e3Sthurlow dnp = VTOSMB(dvp); 25704bff34e3Sthurlow vp = NULL; 25714bff34e3Sthurlow 2572a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 25734bff34e3Sthurlow return (EPERM); 25744bff34e3Sthurlow 25754bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED) 25764bff34e3Sthurlow return (EIO); 25774bff34e3Sthurlow 25784bff34e3Sthurlow /* 25794bff34e3Sthurlow * Note: this may break mknod(2) calls to create a directory, 25804bff34e3Sthurlow * but that's obscure use. Some other filesystems do this. 25815f4fc069Sjilinxpd * Todo: redirect VDIR type here to _mkdir. 25824bff34e3Sthurlow */ 25834bff34e3Sthurlow if (va->va_type != VREG) 25844bff34e3Sthurlow return (EINVAL); 25854bff34e3Sthurlow 25864bff34e3Sthurlow /* 25874bff34e3Sthurlow * If the pathname is "", just use dvp, no checks. 25884bff34e3Sthurlow * Do this outside of the rwlock (like zfs). 25894bff34e3Sthurlow */ 25904bff34e3Sthurlow if (nmlen == 0) { 25914bff34e3Sthurlow VN_HOLD(dvp); 25924bff34e3Sthurlow *vpp = dvp; 25934bff34e3Sthurlow return (0); 25944bff34e3Sthurlow } 25954bff34e3Sthurlow 25964bff34e3Sthurlow /* Don't allow "." or ".." through here. */ 25974bff34e3Sthurlow if ((nmlen == 1 && name[0] == '.') || 25984bff34e3Sthurlow (nmlen == 2 && name[0] == '.' && name[1] == '.')) 25994bff34e3Sthurlow return (EISDIR); 26004bff34e3Sthurlow 26014bff34e3Sthurlow /* 26024bff34e3Sthurlow * We make a copy of the attributes because the caller does not 26034bff34e3Sthurlow * expect us to change what va points to. 26044bff34e3Sthurlow */ 26054bff34e3Sthurlow vattr = *va; 26064bff34e3Sthurlow 26074bff34e3Sthurlow if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp))) 26084bff34e3Sthurlow return (EINTR); 2609613a2f6bSGordon Ross smb_credinit(&scred, cr); 26104bff34e3Sthurlow 26114bff34e3Sthurlow /* 26124bff34e3Sthurlow * NFS needs to go over the wire, just to be sure whether the 261302d09e03SGordon Ross * file exists or not. Using a cached result is dangerous in 26144bff34e3Sthurlow * this case when making a decision regarding existence. 26154bff34e3Sthurlow * 26164bff34e3Sthurlow * The SMB protocol does NOT really need to go OTW here 26174bff34e3Sthurlow * thanks to the expressive NTCREATE disposition values. 26184bff34e3Sthurlow * Unfortunately, to do Unix access checks correctly, 26194bff34e3Sthurlow * we need to know if the object already exists. 26204bff34e3Sthurlow * When the object does not exist, we need VWRITE on 26214bff34e3Sthurlow * the directory. Note: smbfslookup() checks VEXEC. 26224bff34e3Sthurlow */ 26234bff34e3Sthurlow error = smbfslookup(dvp, nm, &vp, cr, 0, ct); 26244bff34e3Sthurlow if (error == 0) { 26254bff34e3Sthurlow /* 262642645588SGordon Ross * The file already exists. Error? 262742645588SGordon Ross * NB: have a hold from smbfslookup 26284bff34e3Sthurlow */ 26294bff34e3Sthurlow if (exclusive == EXCL) { 26304bff34e3Sthurlow error = EEXIST; 263142645588SGordon Ross VN_RELE(vp); 26324bff34e3Sthurlow goto out; 26334bff34e3Sthurlow } 26344bff34e3Sthurlow /* 26354bff34e3Sthurlow * Verify requested access. 26364bff34e3Sthurlow */ 26374bff34e3Sthurlow error = smbfs_access(vp, mode, 0, cr, ct); 263842645588SGordon Ross if (error) { 263942645588SGordon Ross VN_RELE(vp); 26404bff34e3Sthurlow goto out; 264142645588SGordon Ross } 26424bff34e3Sthurlow 26434bff34e3Sthurlow /* 26444bff34e3Sthurlow * Truncate (if requested). 26454bff34e3Sthurlow */ 26465f4fc069Sjilinxpd if ((vattr.va_mask & AT_SIZE) && vp->v_type == VREG) { 26475f4fc069Sjilinxpd np = VTOSMB(vp); 26485f4fc069Sjilinxpd /* 26495f4fc069Sjilinxpd * Check here for large file truncation by 26505f4fc069Sjilinxpd * LF-unaware process, like ufs_create(). 26515f4fc069Sjilinxpd */ 26525f4fc069Sjilinxpd if (!(lfaware & FOFFMAX)) { 26535f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 26545f4fc069Sjilinxpd if (np->r_size > MAXOFF32_T) 26555f4fc069Sjilinxpd error = EOVERFLOW; 26565f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 26575f4fc069Sjilinxpd } 26585f4fc069Sjilinxpd if (error) { 26595f4fc069Sjilinxpd VN_RELE(vp); 26605f4fc069Sjilinxpd goto out; 26615f4fc069Sjilinxpd } 26625f4fc069Sjilinxpd vattr.va_mask = AT_SIZE; 26635f4fc069Sjilinxpd error = smbfssetattr(vp, &vattr, 0, cr); 266442645588SGordon Ross if (error) { 266542645588SGordon Ross VN_RELE(vp); 26664bff34e3Sthurlow goto out; 266742645588SGordon Ross } 26685f4fc069Sjilinxpd #ifdef SMBFS_VNEVENT 26695f4fc069Sjilinxpd /* Existing file was truncated */ 26705f4fc069Sjilinxpd vnevent_create(vp, ct); 26715f4fc069Sjilinxpd #endif 26725f4fc069Sjilinxpd /* invalidate pages done in smbfssetattr() */ 26734bff34e3Sthurlow } 26744bff34e3Sthurlow /* Success! */ 26754bff34e3Sthurlow *vpp = vp; 26764bff34e3Sthurlow goto out; 26774bff34e3Sthurlow } 26784bff34e3Sthurlow 26794bff34e3Sthurlow /* 26804bff34e3Sthurlow * The file did not exist. Need VWRITE in the directory. 26814bff34e3Sthurlow */ 26824bff34e3Sthurlow error = smbfs_access(dvp, VWRITE, 0, cr, ct); 26834bff34e3Sthurlow if (error) 26844bff34e3Sthurlow goto out; 26854bff34e3Sthurlow 26864bff34e3Sthurlow /* 26874bff34e3Sthurlow * Now things get tricky. We also need to check the 26884bff34e3Sthurlow * requested open mode against the file we may create. 26894bff34e3Sthurlow * See comments at smbfs_access_rwx 26904bff34e3Sthurlow */ 26914bff34e3Sthurlow error = smbfs_access_rwx(vfsp, VREG, mode, cr); 26924bff34e3Sthurlow if (error) 26934bff34e3Sthurlow goto out; 26944bff34e3Sthurlow 26954bff34e3Sthurlow /* 26964bff34e3Sthurlow * Now the code derived from Darwin, 26974bff34e3Sthurlow * but with greater use of NT_CREATE 26984bff34e3Sthurlow * disposition options. Much changed. 26994bff34e3Sthurlow * 27004bff34e3Sthurlow * Create (or open) a new child node. 27014bff34e3Sthurlow * Note we handled "." and ".." above. 27024bff34e3Sthurlow */ 27034bff34e3Sthurlow 27044bff34e3Sthurlow if (exclusive == EXCL) 27054bff34e3Sthurlow disp = NTCREATEX_DISP_CREATE; 27064bff34e3Sthurlow else { 27074bff34e3Sthurlow /* Truncate regular files if requested. */ 27084bff34e3Sthurlow if ((va->va_type == VREG) && 27094bff34e3Sthurlow (va->va_mask & AT_SIZE) && 27104bff34e3Sthurlow (va->va_size == 0)) 27114bff34e3Sthurlow disp = NTCREATEX_DISP_OVERWRITE_IF; 27124bff34e3Sthurlow else 27134bff34e3Sthurlow disp = NTCREATEX_DISP_OPEN_IF; 27144bff34e3Sthurlow } 271591d632c8Sgwr xattr = (dnp->n_flag & N_XATTR) ? 1 : 0; 271602d09e03SGordon Ross error = smbfs_smb_create(dnp, 271702d09e03SGordon Ross name, nmlen, xattr, 271802d09e03SGordon Ross disp, &scred, &fid); 27194bff34e3Sthurlow if (error) 27204bff34e3Sthurlow goto out; 27214bff34e3Sthurlow 27224bff34e3Sthurlow /* 27234bff34e3Sthurlow * Should use the fid to get/set the size 27244bff34e3Sthurlow * while we have it opened here. See above. 27254bff34e3Sthurlow */ 27264bff34e3Sthurlow 27274bff34e3Sthurlow cerror = smbfs_smb_close(smi->smi_share, fid, NULL, &scred); 27284bff34e3Sthurlow if (cerror) 272902d09e03SGordon Ross SMBVDEBUG("error %d closing %s\\%s\n", 27304bff34e3Sthurlow cerror, dnp->n_rpath, name); 27314bff34e3Sthurlow 27324bff34e3Sthurlow /* 27334bff34e3Sthurlow * In the open case, the name may differ a little 27344bff34e3Sthurlow * from what we passed to create (case, etc.) 27354bff34e3Sthurlow * so call lookup to get the (opened) name. 27364bff34e3Sthurlow * 27374bff34e3Sthurlow * XXX: Could avoid this extra lookup if the 27384bff34e3Sthurlow * "createact" result from NT_CREATE says we 27394bff34e3Sthurlow * created the object. 27404bff34e3Sthurlow */ 27414bff34e3Sthurlow error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred); 27424bff34e3Sthurlow if (error) 27434bff34e3Sthurlow goto out; 27444bff34e3Sthurlow 27454bff34e3Sthurlow /* update attr and directory cache */ 27464bff34e3Sthurlow smbfs_attr_touchdir(dnp); 27474bff34e3Sthurlow 27484bff34e3Sthurlow error = smbfs_nget(dvp, name, nmlen, &fattr, &vp); 27494bff34e3Sthurlow if (error) 27504bff34e3Sthurlow goto out; 27514bff34e3Sthurlow 27524bff34e3Sthurlow /* Success! */ 27534bff34e3Sthurlow *vpp = vp; 27544bff34e3Sthurlow error = 0; 27554bff34e3Sthurlow 27564bff34e3Sthurlow out: 27574bff34e3Sthurlow smb_credrele(&scred); 275802d09e03SGordon Ross smbfs_rw_exit(&dnp->r_rwlock); 27594bff34e3Sthurlow if (name != nm) 27604bff34e3Sthurlow smbfs_name_free(name, nmlen); 27614bff34e3Sthurlow return (error); 27624bff34e3Sthurlow } 27634bff34e3Sthurlow 27644bff34e3Sthurlow /* 27654bff34e3Sthurlow * XXX 27664bff34e3Sthurlow * This op should support the new FIGNORECASE flag for case-insensitive 27674bff34e3Sthurlow * lookups, per PSARC 2007/244. 27684bff34e3Sthurlow */ 27694bff34e3Sthurlow /* ARGSUSED */ 27704bff34e3Sthurlow static int 27714bff34e3Sthurlow smbfs_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct, 27724bff34e3Sthurlow int flags) 27734bff34e3Sthurlow { 27744bff34e3Sthurlow struct smb_cred scred; 2775ff1e230cSjilinxpd vnode_t *vp = NULL; 2776ff1e230cSjilinxpd smbnode_t *dnp = VTOSMB(dvp); 2777ff1e230cSjilinxpd smbmntinfo_t *smi = VTOSMI(dvp); 2778ff1e230cSjilinxpd int error; 27794bff34e3Sthurlow 2780a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 27814bff34e3Sthurlow return (EPERM); 27824bff34e3Sthurlow 27834bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 27844bff34e3Sthurlow return (EIO); 27854bff34e3Sthurlow 27864bff34e3Sthurlow /* 27874bff34e3Sthurlow * Verify access to the dirctory. 27884bff34e3Sthurlow */ 27894bff34e3Sthurlow error = smbfs_access(dvp, VWRITE|VEXEC, 0, cr, ct); 27904bff34e3Sthurlow if (error) 2791ff1e230cSjilinxpd return (error); 2792ff1e230cSjilinxpd 2793ff1e230cSjilinxpd if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp))) 2794ff1e230cSjilinxpd return (EINTR); 2795ff1e230cSjilinxpd smb_credinit(&scred, cr); 2796ff1e230cSjilinxpd 2797ff1e230cSjilinxpd /* Lookup the file to remove. */ 2798ff1e230cSjilinxpd error = smbfslookup(dvp, nm, &vp, cr, 0, ct); 2799ff1e230cSjilinxpd if (error == 0) { 2800ff1e230cSjilinxpd /* 2801ff1e230cSjilinxpd * Do the real remove work 2802ff1e230cSjilinxpd */ 2803ff1e230cSjilinxpd error = smbfsremove(dvp, vp, &scred, flags); 2804ff1e230cSjilinxpd VN_RELE(vp); 2805ff1e230cSjilinxpd } 2806ff1e230cSjilinxpd 2807ff1e230cSjilinxpd smb_credrele(&scred); 2808ff1e230cSjilinxpd smbfs_rw_exit(&dnp->r_rwlock); 2809ff1e230cSjilinxpd 2810ff1e230cSjilinxpd return (error); 2811ff1e230cSjilinxpd } 2812ff1e230cSjilinxpd 2813ff1e230cSjilinxpd /* 2814ff1e230cSjilinxpd * smbfsremove does the real work of removing in SMBFS 2815ff1e230cSjilinxpd * Caller has done dir access checks etc. 2816ff1e230cSjilinxpd * 2817ff1e230cSjilinxpd * The normal way to delete a file over SMB is open it (with DELETE access), 2818ff1e230cSjilinxpd * set the "delete-on-close" flag, and close the file. The problem for Unix 2819ff1e230cSjilinxpd * applications is that they expect the file name to be gone once the unlink 2820ff1e230cSjilinxpd * completes, and the SMB server does not actually delete the file until ALL 2821ff1e230cSjilinxpd * opens of that file are closed. We can't assume our open handles are the 2822ff1e230cSjilinxpd * only open handles on a file we're deleting, so to be safe we'll try to 2823ff1e230cSjilinxpd * rename the file to a temporary name and then set delete-on-close. If we 2824ff1e230cSjilinxpd * fail to set delete-on-close (i.e. because other opens prevent it) then 2825ff1e230cSjilinxpd * undo the changes we made and give up with EBUSY. Note that we might have 2826ff1e230cSjilinxpd * permission to delete a file but lack permission to rename, so we want to 2827ff1e230cSjilinxpd * continue in cases where rename fails. As an optimization, only do the 2828ff1e230cSjilinxpd * rename when we have the file open. 2829ff1e230cSjilinxpd * 2830ff1e230cSjilinxpd * This is similar to what NFS does when deleting a file that has local opens, 2831ff1e230cSjilinxpd * but thanks to SMB delete-on-close, we don't need to keep track of when the 2832ff1e230cSjilinxpd * last local open goes away and send a delete. The server does that for us. 2833ff1e230cSjilinxpd */ 2834ff1e230cSjilinxpd /* ARGSUSED */ 2835ff1e230cSjilinxpd static int 2836ff1e230cSjilinxpd smbfsremove(vnode_t *dvp, vnode_t *vp, struct smb_cred *scred, 2837ff1e230cSjilinxpd int flags) 2838ff1e230cSjilinxpd { 2839ff1e230cSjilinxpd smbnode_t *dnp = VTOSMB(dvp); 2840ff1e230cSjilinxpd smbnode_t *np = VTOSMB(vp); 2841ff1e230cSjilinxpd char *tmpname = NULL; 2842ff1e230cSjilinxpd int tnlen; 2843ff1e230cSjilinxpd int error; 2844ff1e230cSjilinxpd unsigned short fid; 2845ff1e230cSjilinxpd boolean_t have_fid = B_FALSE; 2846ff1e230cSjilinxpd boolean_t renamed = B_FALSE; 28474bff34e3Sthurlow 28484bff34e3Sthurlow /* 2849ff1e230cSjilinxpd * The dvp RWlock must be held as writer. 28504bff34e3Sthurlow */ 2851ff1e230cSjilinxpd ASSERT(dnp->r_rwlock.owner == curthread); 28524bff34e3Sthurlow 2853ff1e230cSjilinxpd /* Never allow link/unlink directories on SMB. */ 2854ff1e230cSjilinxpd if (vp->v_type == VDIR) 2855ff1e230cSjilinxpd return (EPERM); 2856ff1e230cSjilinxpd 28575f4fc069Sjilinxpd /* 28585f4fc069Sjilinxpd * We need to flush any dirty pages which happen to 28595f4fc069Sjilinxpd * be hanging around before removing the file. This 28605f4fc069Sjilinxpd * shouldn't happen very often and mostly on file 28615f4fc069Sjilinxpd * systems mounted "nocto". 28625f4fc069Sjilinxpd */ 28635f4fc069Sjilinxpd if (vn_has_cached_data(vp) && 28645f4fc069Sjilinxpd ((np->r_flags & RDIRTY) || np->r_count > 0)) { 28655f4fc069Sjilinxpd error = smbfs_putpage(vp, (offset_t)0, 0, 0, 28665f4fc069Sjilinxpd scred->scr_cred, NULL); 28675f4fc069Sjilinxpd if (error && (error == ENOSPC || error == EDQUOT)) { 28685f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 28695f4fc069Sjilinxpd if (!np->r_error) 28705f4fc069Sjilinxpd np->r_error = error; 28715f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 28725f4fc069Sjilinxpd } 28735f4fc069Sjilinxpd } 28745f4fc069Sjilinxpd 2875ff1e230cSjilinxpd /* Shared lock for n_fid use in smbfs_smb_setdisp etc. */ 2876ff1e230cSjilinxpd if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp))) 2877ff1e230cSjilinxpd return (EINTR); 2878ff1e230cSjilinxpd 2879ff1e230cSjilinxpd /* 2880ff1e230cSjilinxpd * Get a file handle with delete access. 2881ff1e230cSjilinxpd * Close this FID before return. 2882ff1e230cSjilinxpd */ 2883ff1e230cSjilinxpd error = smbfs_smb_tmpopen(np, STD_RIGHT_DELETE_ACCESS, 2884ff1e230cSjilinxpd scred, &fid); 2885ff1e230cSjilinxpd if (error) { 2886ff1e230cSjilinxpd SMBVDEBUG("error %d opening %s\n", 2887ff1e230cSjilinxpd error, np->n_rpath); 28884bff34e3Sthurlow goto out; 28894bff34e3Sthurlow } 2890ff1e230cSjilinxpd have_fid = B_TRUE; 28914bff34e3Sthurlow 28924bff34e3Sthurlow /* 2893ff1e230cSjilinxpd * If we have the file open, try to rename it to a temporary name. 2894ff1e230cSjilinxpd * If we can't rename, continue on and try setting DoC anyway. 28954bff34e3Sthurlow */ 289602d09e03SGordon Ross if ((vp->v_count > 1) && (np->n_fidrefs > 0)) { 2897ff1e230cSjilinxpd tmpname = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2898ff1e230cSjilinxpd tnlen = smbfs_newname(tmpname, MAXNAMELEN); 2899ff1e230cSjilinxpd error = smbfs_smb_t2rename(np, tmpname, tnlen, scred, fid, 0); 2900ff1e230cSjilinxpd if (error != 0) { 2901ff1e230cSjilinxpd SMBVDEBUG("error %d renaming %s -> %s\n", 29025f4fc069Sjilinxpd error, np->n_rpath, tmpname); 2903ff1e230cSjilinxpd /* Keep going without the rename. */ 2904ff1e230cSjilinxpd } else { 2905ff1e230cSjilinxpd renamed = B_TRUE; 2906ff1e230cSjilinxpd } 2907ff1e230cSjilinxpd } 29084bff34e3Sthurlow 2909ff1e230cSjilinxpd /* 2910ff1e230cSjilinxpd * Mark the file as delete-on-close. If we can't, 2911ff1e230cSjilinxpd * undo what we did and err out. 2912ff1e230cSjilinxpd */ 2913ff1e230cSjilinxpd error = smbfs_smb_setdisp(np, fid, 1, scred); 2914ff1e230cSjilinxpd if (error != 0) { 2915ff1e230cSjilinxpd SMBVDEBUG("error %d setting DoC on %s\n", 2916ff1e230cSjilinxpd error, np->n_rpath); 291702d09e03SGordon Ross /* 2918ff1e230cSjilinxpd * Failed to set DoC. If we renamed, undo that. 2919ff1e230cSjilinxpd * Need np->n_rpath relative to parent (dnp). 2920ff1e230cSjilinxpd * Use parent path name length plus one for 2921ff1e230cSjilinxpd * the separator ('/' or ':') 292202d09e03SGordon Ross */ 2923ff1e230cSjilinxpd if (renamed) { 2924ff1e230cSjilinxpd char *oldname; 2925ff1e230cSjilinxpd int oldnlen; 2926ff1e230cSjilinxpd int err2; 2927ff1e230cSjilinxpd 2928ff1e230cSjilinxpd oldname = np->n_rpath + (dnp->n_rplen + 1); 2929ff1e230cSjilinxpd oldnlen = np->n_rplen - (dnp->n_rplen + 1); 2930ff1e230cSjilinxpd err2 = smbfs_smb_t2rename(np, oldname, oldnlen, 2931ff1e230cSjilinxpd scred, fid, 0); 2932ff1e230cSjilinxpd SMBVDEBUG("error %d un-renaming %s -> %s\n", 29335f4fc069Sjilinxpd err2, tmpname, np->n_rpath); 293402d09e03SGordon Ross } 2935ff1e230cSjilinxpd error = EBUSY; 2936ff1e230cSjilinxpd goto out; 29374bff34e3Sthurlow } 2938ff1e230cSjilinxpd /* Done! */ 2939ff1e230cSjilinxpd smbfs_attrcache_prune(np); 29404bff34e3Sthurlow 29415f4fc069Sjilinxpd #ifdef SMBFS_VNEVENT 29425f4fc069Sjilinxpd vnevent_remove(vp, dvp, nm, ct); 29435f4fc069Sjilinxpd #endif 29445f4fc069Sjilinxpd 29454bff34e3Sthurlow out: 2946ff1e230cSjilinxpd if (tmpname != NULL) 2947ff1e230cSjilinxpd kmem_free(tmpname, MAXNAMELEN); 2948ff1e230cSjilinxpd 2949ff1e230cSjilinxpd if (have_fid) 2950ff1e230cSjilinxpd (void) smbfs_smb_tmpclose(np, fid, scred); 2951ff1e230cSjilinxpd smbfs_rw_exit(&np->r_lkserlock); 2952ff1e230cSjilinxpd 2953ff1e230cSjilinxpd if (error == 0) { 2954ff1e230cSjilinxpd /* Keep lookup from finding this node anymore. */ 2955ff1e230cSjilinxpd smbfs_rmhash(np); 2956ff1e230cSjilinxpd } 29574bff34e3Sthurlow 29584bff34e3Sthurlow return (error); 29594bff34e3Sthurlow } 29604bff34e3Sthurlow 29614bff34e3Sthurlow 29625f4fc069Sjilinxpd /* ARGSUSED */ 29635f4fc069Sjilinxpd static int 29645f4fc069Sjilinxpd smbfs_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr, 29655f4fc069Sjilinxpd caller_context_t *ct, int flags) 29665f4fc069Sjilinxpd { 29675f4fc069Sjilinxpd /* Not yet... */ 29685f4fc069Sjilinxpd return (ENOSYS); 29695f4fc069Sjilinxpd } 29705f4fc069Sjilinxpd 29715f4fc069Sjilinxpd 29724bff34e3Sthurlow /* 29734bff34e3Sthurlow * XXX 29744bff34e3Sthurlow * This op should support the new FIGNORECASE flag for case-insensitive 29754bff34e3Sthurlow * lookups, per PSARC 2007/244. 29764bff34e3Sthurlow */ 29774bff34e3Sthurlow /* ARGSUSED */ 29784bff34e3Sthurlow static int 29794bff34e3Sthurlow smbfs_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr, 29804bff34e3Sthurlow caller_context_t *ct, int flags) 29814bff34e3Sthurlow { 2982ff1e230cSjilinxpd struct smb_cred scred; 2983ff1e230cSjilinxpd smbnode_t *odnp = VTOSMB(odvp); 2984ff1e230cSjilinxpd smbnode_t *ndnp = VTOSMB(ndvp); 2985ff1e230cSjilinxpd vnode_t *ovp; 2986ff1e230cSjilinxpd int error; 29874bff34e3Sthurlow 2988a19609f8Sjv if (curproc->p_zone != VTOSMI(odvp)->smi_zone_ref.zref_zone || 2989a19609f8Sjv curproc->p_zone != VTOSMI(ndvp)->smi_zone_ref.zref_zone) 29904bff34e3Sthurlow return (EPERM); 29914bff34e3Sthurlow 29924bff34e3Sthurlow if (VTOSMI(odvp)->smi_flags & SMI_DEAD || 29934bff34e3Sthurlow VTOSMI(ndvp)->smi_flags & SMI_DEAD || 29944bff34e3Sthurlow odvp->v_vfsp->vfs_flag & VFS_UNMOUNTED || 29954bff34e3Sthurlow ndvp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 29964bff34e3Sthurlow return (EIO); 29974bff34e3Sthurlow 29984bff34e3Sthurlow if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 || 29994bff34e3Sthurlow strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0) 30004bff34e3Sthurlow return (EINVAL); 30014bff34e3Sthurlow 30024bff34e3Sthurlow /* 30034bff34e3Sthurlow * Check that everything is on the same filesystem. 30044bff34e3Sthurlow * vn_rename checks the fsid's, but in case we don't 30054bff34e3Sthurlow * fill those in correctly, check here too. 30064bff34e3Sthurlow */ 30074bff34e3Sthurlow if (odvp->v_vfsp != ndvp->v_vfsp) 30084bff34e3Sthurlow return (EXDEV); 30094bff34e3Sthurlow 3010ff1e230cSjilinxpd /* 3011ff1e230cSjilinxpd * Need write access on source and target. 3012ff1e230cSjilinxpd * Server takes care of most checks. 3013ff1e230cSjilinxpd */ 3014ff1e230cSjilinxpd error = smbfs_access(odvp, VWRITE|VEXEC, 0, cr, ct); 3015ff1e230cSjilinxpd if (error) 3016ff1e230cSjilinxpd return (error); 3017ff1e230cSjilinxpd if (odvp != ndvp) { 3018ff1e230cSjilinxpd error = smbfs_access(ndvp, VWRITE, 0, cr, ct); 3019ff1e230cSjilinxpd if (error) 3020ff1e230cSjilinxpd return (error); 3021ff1e230cSjilinxpd } 30224bff34e3Sthurlow 30234bff34e3Sthurlow /* 3024ff1e230cSjilinxpd * Need to lock both old/new dirs as writer. 3025ff1e230cSjilinxpd * 30264bff34e3Sthurlow * Avoid deadlock here on old vs new directory nodes 30274bff34e3Sthurlow * by always taking the locks in order of address. 30284bff34e3Sthurlow * The order is arbitrary, but must be consistent. 30294bff34e3Sthurlow */ 30304bff34e3Sthurlow if (odnp < ndnp) { 30314bff34e3Sthurlow if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER, 30324bff34e3Sthurlow SMBINTR(odvp))) 30334bff34e3Sthurlow return (EINTR); 30344bff34e3Sthurlow if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER, 30354bff34e3Sthurlow SMBINTR(ndvp))) { 30364bff34e3Sthurlow smbfs_rw_exit(&odnp->r_rwlock); 30374bff34e3Sthurlow return (EINTR); 30384bff34e3Sthurlow } 30394bff34e3Sthurlow } else { 30404bff34e3Sthurlow if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER, 30414bff34e3Sthurlow SMBINTR(ndvp))) 30424bff34e3Sthurlow return (EINTR); 30434bff34e3Sthurlow if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER, 30444bff34e3Sthurlow SMBINTR(odvp))) { 30454bff34e3Sthurlow smbfs_rw_exit(&ndnp->r_rwlock); 30464bff34e3Sthurlow return (EINTR); 30474bff34e3Sthurlow } 30484bff34e3Sthurlow } 304902d09e03SGordon Ross smb_credinit(&scred, cr); 30504bff34e3Sthurlow 3051ff1e230cSjilinxpd /* Lookup the "old" name */ 3052ff1e230cSjilinxpd error = smbfslookup(odvp, onm, &ovp, cr, 0, ct); 3053ff1e230cSjilinxpd if (error == 0) { 3054ff1e230cSjilinxpd /* 3055ff1e230cSjilinxpd * Do the real rename work 3056ff1e230cSjilinxpd */ 3057ff1e230cSjilinxpd error = smbfsrename(odvp, ovp, ndvp, nnm, &scred, flags); 3058ff1e230cSjilinxpd VN_RELE(ovp); 30594bff34e3Sthurlow } 30604bff34e3Sthurlow 3061ff1e230cSjilinxpd smb_credrele(&scred); 3062ff1e230cSjilinxpd smbfs_rw_exit(&odnp->r_rwlock); 3063ff1e230cSjilinxpd smbfs_rw_exit(&ndnp->r_rwlock); 3064ff1e230cSjilinxpd 3065ff1e230cSjilinxpd return (error); 3066ff1e230cSjilinxpd } 3067ff1e230cSjilinxpd 3068ff1e230cSjilinxpd /* 3069ff1e230cSjilinxpd * smbfsrename does the real work of renaming in SMBFS 3070ff1e230cSjilinxpd * Caller has done dir access checks etc. 3071ff1e230cSjilinxpd */ 3072ff1e230cSjilinxpd /* ARGSUSED */ 3073ff1e230cSjilinxpd static int 3074ff1e230cSjilinxpd smbfsrename(vnode_t *odvp, vnode_t *ovp, vnode_t *ndvp, char *nnm, 3075ff1e230cSjilinxpd struct smb_cred *scred, int flags) 3076ff1e230cSjilinxpd { 3077ff1e230cSjilinxpd smbnode_t *odnp = VTOSMB(odvp); 3078ff1e230cSjilinxpd smbnode_t *onp = VTOSMB(ovp); 3079ff1e230cSjilinxpd smbnode_t *ndnp = VTOSMB(ndvp); 3080ff1e230cSjilinxpd vnode_t *nvp = NULL; 3081ff1e230cSjilinxpd int error; 3082ff1e230cSjilinxpd int nvp_locked = 0; 3083ff1e230cSjilinxpd 3084ff1e230cSjilinxpd /* Things our caller should have checked. */ 3085ff1e230cSjilinxpd ASSERT(curproc->p_zone == VTOSMI(odvp)->smi_zone_ref.zref_zone); 3086ff1e230cSjilinxpd ASSERT(odvp->v_vfsp == ndvp->v_vfsp); 3087ff1e230cSjilinxpd ASSERT(odnp->r_rwlock.owner == curthread); 3088ff1e230cSjilinxpd ASSERT(ndnp->r_rwlock.owner == curthread); 30894bff34e3Sthurlow 30904bff34e3Sthurlow /* 30914bff34e3Sthurlow * Lookup the target file. If it exists, it needs to be 30924bff34e3Sthurlow * checked to see whether it is a mount point and whether 30934bff34e3Sthurlow * it is active (open). 30944bff34e3Sthurlow */ 3095ff1e230cSjilinxpd error = smbfslookup(ndvp, nnm, &nvp, scred->scr_cred, 0, NULL); 30964bff34e3Sthurlow if (!error) { 30974bff34e3Sthurlow /* 30984bff34e3Sthurlow * Target (nvp) already exists. Check that it 30994bff34e3Sthurlow * has the same type as the source. The server 31004bff34e3Sthurlow * will check this also, (and more reliably) but 31014bff34e3Sthurlow * this lets us return the correct error codes. 31024bff34e3Sthurlow */ 31034bff34e3Sthurlow if (ovp->v_type == VDIR) { 31044bff34e3Sthurlow if (nvp->v_type != VDIR) { 31054bff34e3Sthurlow error = ENOTDIR; 31064bff34e3Sthurlow goto out; 31074bff34e3Sthurlow } 31084bff34e3Sthurlow } else { 31094bff34e3Sthurlow if (nvp->v_type == VDIR) { 31104bff34e3Sthurlow error = EISDIR; 31114bff34e3Sthurlow goto out; 31124bff34e3Sthurlow } 31134bff34e3Sthurlow } 31144bff34e3Sthurlow 31154bff34e3Sthurlow /* 31164bff34e3Sthurlow * POSIX dictates that when the source and target 31174bff34e3Sthurlow * entries refer to the same file object, rename 31184bff34e3Sthurlow * must do nothing and exit without error. 31194bff34e3Sthurlow */ 31204bff34e3Sthurlow if (ovp == nvp) { 31214bff34e3Sthurlow error = 0; 31224bff34e3Sthurlow goto out; 31234bff34e3Sthurlow } 31244bff34e3Sthurlow 31254bff34e3Sthurlow /* 31264bff34e3Sthurlow * Also must ensure the target is not a mount point, 31274bff34e3Sthurlow * and keep mount/umount away until we're done. 31284bff34e3Sthurlow */ 31294bff34e3Sthurlow if (vn_vfsrlock(nvp)) { 31304bff34e3Sthurlow error = EBUSY; 31314bff34e3Sthurlow goto out; 31324bff34e3Sthurlow } 31334bff34e3Sthurlow nvp_locked = 1; 31344bff34e3Sthurlow if (vn_mountedvfs(nvp) != NULL) { 31354bff34e3Sthurlow error = EBUSY; 31364bff34e3Sthurlow goto out; 31374bff34e3Sthurlow } 31384bff34e3Sthurlow 313991d632c8Sgwr /* 3140ff1e230cSjilinxpd * CIFS may give a SHARING_VIOLATION error when 314191d632c8Sgwr * trying to rename onto an exising object, 314291d632c8Sgwr * so try to remove the target first. 314391d632c8Sgwr * (Only for files, not directories.) 314491d632c8Sgwr */ 314591d632c8Sgwr if (nvp->v_type == VDIR) { 314691d632c8Sgwr error = EEXIST; 314791d632c8Sgwr goto out; 314891d632c8Sgwr } 3149ff1e230cSjilinxpd error = smbfsremove(ndvp, nvp, scred, flags); 3150ff1e230cSjilinxpd if (error != 0) 31514bff34e3Sthurlow goto out; 315202d09e03SGordon Ross 315391d632c8Sgwr /* 315491d632c8Sgwr * OK, removed the target file. Continue as if 315591d632c8Sgwr * lookup target had failed (nvp == NULL). 315691d632c8Sgwr */ 315791d632c8Sgwr vn_vfsunlock(nvp); 315891d632c8Sgwr nvp_locked = 0; 315991d632c8Sgwr VN_RELE(nvp); 316091d632c8Sgwr nvp = NULL; 31614bff34e3Sthurlow } /* nvp */ 31624bff34e3Sthurlow 316302d09e03SGordon Ross smbfs_attrcache_remove(onp); 3164ff1e230cSjilinxpd error = smbfs_smb_rename(onp, ndnp, nnm, strlen(nnm), scred); 31654bff34e3Sthurlow 316602d09e03SGordon Ross /* 316702d09e03SGordon Ross * If the old name should no longer exist, 316802d09e03SGordon Ross * discard any cached attributes under it. 316902d09e03SGordon Ross */ 31705f4fc069Sjilinxpd if (error == 0) { 317102d09e03SGordon Ross smbfs_attrcache_prune(onp); 31725f4fc069Sjilinxpd /* SMBFS_VNEVENT... */ 31735f4fc069Sjilinxpd } 31744bff34e3Sthurlow 31754bff34e3Sthurlow out: 31764bff34e3Sthurlow if (nvp) { 31774bff34e3Sthurlow if (nvp_locked) 31784bff34e3Sthurlow vn_vfsunlock(nvp); 31794bff34e3Sthurlow VN_RELE(nvp); 31804bff34e3Sthurlow } 31814bff34e3Sthurlow 31824bff34e3Sthurlow return (error); 31834bff34e3Sthurlow } 31844bff34e3Sthurlow 31854bff34e3Sthurlow /* 31864bff34e3Sthurlow * XXX 31874bff34e3Sthurlow * vsecattr_t is new to build 77, and we need to eventually support 31884bff34e3Sthurlow * it in order to create an ACL when an object is created. 31894bff34e3Sthurlow * 31904bff34e3Sthurlow * This op should support the new FIGNORECASE flag for case-insensitive 31914bff34e3Sthurlow * lookups, per PSARC 2007/244. 31924bff34e3Sthurlow */ 31934bff34e3Sthurlow /* ARGSUSED */ 31944bff34e3Sthurlow static int 31954bff34e3Sthurlow smbfs_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp, 31964bff34e3Sthurlow cred_t *cr, caller_context_t *ct, int flags, vsecattr_t *vsecp) 31974bff34e3Sthurlow { 31984bff34e3Sthurlow vnode_t *vp; 31994bff34e3Sthurlow struct smbnode *dnp = VTOSMB(dvp); 32004bff34e3Sthurlow struct smbmntinfo *smi = VTOSMI(dvp); 32014bff34e3Sthurlow struct smb_cred scred; 32024bff34e3Sthurlow struct smbfattr fattr; 32034bff34e3Sthurlow const char *name = (const char *) nm; 32044bff34e3Sthurlow int nmlen = strlen(name); 32054bff34e3Sthurlow int error, hiderr; 32064bff34e3Sthurlow 3207a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 32084bff34e3Sthurlow return (EPERM); 32094bff34e3Sthurlow 32104bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 32114bff34e3Sthurlow return (EIO); 32124bff34e3Sthurlow 32134bff34e3Sthurlow if ((nmlen == 1 && name[0] == '.') || 32144bff34e3Sthurlow (nmlen == 2 && name[0] == '.' && name[1] == '.')) 32154bff34e3Sthurlow return (EEXIST); 32164bff34e3Sthurlow 321791d632c8Sgwr /* Only plain files are allowed in V_XATTRDIR. */ 321891d632c8Sgwr if (dvp->v_flag & V_XATTRDIR) 321991d632c8Sgwr return (EINVAL); 322091d632c8Sgwr 32214bff34e3Sthurlow if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp))) 32224bff34e3Sthurlow return (EINTR); 3223613a2f6bSGordon Ross smb_credinit(&scred, cr); 32244bff34e3Sthurlow 32254bff34e3Sthurlow /* 32264bff34e3Sthurlow * Require write access in the containing directory. 32274bff34e3Sthurlow */ 32284bff34e3Sthurlow error = smbfs_access(dvp, VWRITE, 0, cr, ct); 32294bff34e3Sthurlow if (error) 32304bff34e3Sthurlow goto out; 32314bff34e3Sthurlow 32324bff34e3Sthurlow error = smbfs_smb_mkdir(dnp, name, nmlen, &scred); 32334bff34e3Sthurlow if (error) 32344bff34e3Sthurlow goto out; 32354bff34e3Sthurlow 32364bff34e3Sthurlow error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred); 32374bff34e3Sthurlow if (error) 32384bff34e3Sthurlow goto out; 32394bff34e3Sthurlow 32404bff34e3Sthurlow smbfs_attr_touchdir(dnp); 32414bff34e3Sthurlow 32424bff34e3Sthurlow error = smbfs_nget(dvp, name, nmlen, &fattr, &vp); 32434bff34e3Sthurlow if (error) 32444bff34e3Sthurlow goto out; 32454bff34e3Sthurlow 32464bff34e3Sthurlow if (name[0] == '.') 32474bff34e3Sthurlow if ((hiderr = smbfs_smb_hideit(VTOSMB(vp), NULL, 0, &scred))) 32484bff34e3Sthurlow SMBVDEBUG("hide failure %d\n", hiderr); 32494bff34e3Sthurlow 32504bff34e3Sthurlow /* Success! */ 32514bff34e3Sthurlow *vpp = vp; 32524bff34e3Sthurlow error = 0; 32534bff34e3Sthurlow out: 32544bff34e3Sthurlow smb_credrele(&scred); 32554bff34e3Sthurlow smbfs_rw_exit(&dnp->r_rwlock); 32564bff34e3Sthurlow 32574bff34e3Sthurlow if (name != nm) 32584bff34e3Sthurlow smbfs_name_free(name, nmlen); 32594bff34e3Sthurlow 32604bff34e3Sthurlow return (error); 32614bff34e3Sthurlow } 32624bff34e3Sthurlow 32634bff34e3Sthurlow /* 32644bff34e3Sthurlow * XXX 32654bff34e3Sthurlow * This op should support the new FIGNORECASE flag for case-insensitive 32664bff34e3Sthurlow * lookups, per PSARC 2007/244. 32674bff34e3Sthurlow */ 32684bff34e3Sthurlow /* ARGSUSED */ 32694bff34e3Sthurlow static int 32704bff34e3Sthurlow smbfs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr, 32714bff34e3Sthurlow caller_context_t *ct, int flags) 32724bff34e3Sthurlow { 32734bff34e3Sthurlow vnode_t *vp = NULL; 32744bff34e3Sthurlow int vp_locked = 0; 32754bff34e3Sthurlow struct smbmntinfo *smi = VTOSMI(dvp); 32764bff34e3Sthurlow struct smbnode *dnp = VTOSMB(dvp); 32774bff34e3Sthurlow struct smbnode *np; 32784bff34e3Sthurlow struct smb_cred scred; 32794bff34e3Sthurlow int error; 32804bff34e3Sthurlow 3281a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 32824bff34e3Sthurlow return (EPERM); 32834bff34e3Sthurlow 32844bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 32854bff34e3Sthurlow return (EIO); 32864bff34e3Sthurlow 32874bff34e3Sthurlow if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp))) 32884bff34e3Sthurlow return (EINTR); 3289613a2f6bSGordon Ross smb_credinit(&scred, cr); 32904bff34e3Sthurlow 32914bff34e3Sthurlow /* 32924bff34e3Sthurlow * Require w/x access in the containing directory. 32934bff34e3Sthurlow * Server handles all other access checks. 32944bff34e3Sthurlow */ 32954bff34e3Sthurlow error = smbfs_access(dvp, VEXEC|VWRITE, 0, cr, ct); 32964bff34e3Sthurlow if (error) 32974bff34e3Sthurlow goto out; 32984bff34e3Sthurlow 32994bff34e3Sthurlow /* 33004bff34e3Sthurlow * First lookup the entry to be removed. 33014bff34e3Sthurlow */ 33024bff34e3Sthurlow error = smbfslookup(dvp, nm, &vp, cr, 0, ct); 33034bff34e3Sthurlow if (error) 33044bff34e3Sthurlow goto out; 33054bff34e3Sthurlow np = VTOSMB(vp); 33064bff34e3Sthurlow 33074bff34e3Sthurlow /* 33084bff34e3Sthurlow * Disallow rmdir of "." or current dir, or the FS root. 33094bff34e3Sthurlow * Also make sure it's a directory, not a mount point, 33104bff34e3Sthurlow * and lock to keep mount/umount away until we're done. 33114bff34e3Sthurlow */ 33124bff34e3Sthurlow if ((vp == dvp) || (vp == cdir) || (vp->v_flag & VROOT)) { 33134bff34e3Sthurlow error = EINVAL; 33144bff34e3Sthurlow goto out; 33154bff34e3Sthurlow } 33164bff34e3Sthurlow if (vp->v_type != VDIR) { 33174bff34e3Sthurlow error = ENOTDIR; 33184bff34e3Sthurlow goto out; 33194bff34e3Sthurlow } 33204bff34e3Sthurlow if (vn_vfsrlock(vp)) { 33214bff34e3Sthurlow error = EBUSY; 33224bff34e3Sthurlow goto out; 33234bff34e3Sthurlow } 33244bff34e3Sthurlow vp_locked = 1; 33254bff34e3Sthurlow if (vn_mountedvfs(vp) != NULL) { 33264bff34e3Sthurlow error = EBUSY; 33274bff34e3Sthurlow goto out; 33284bff34e3Sthurlow } 33294bff34e3Sthurlow 333002d09e03SGordon Ross smbfs_attrcache_remove(np); 333102d09e03SGordon Ross error = smbfs_smb_rmdir(np, &scred); 333291d632c8Sgwr 333391d632c8Sgwr /* 333402d09e03SGordon Ross * Similar to smbfs_remove 333591d632c8Sgwr */ 333602d09e03SGordon Ross switch (error) { 333702d09e03SGordon Ross case 0: 333802d09e03SGordon Ross case ENOENT: 333902d09e03SGordon Ross case ENOTDIR: 334002d09e03SGordon Ross smbfs_attrcache_prune(np); 334102d09e03SGordon Ross break; 334291d632c8Sgwr } 334391d632c8Sgwr 33444bff34e3Sthurlow if (error) 33454bff34e3Sthurlow goto out; 33464bff34e3Sthurlow 33474bff34e3Sthurlow mutex_enter(&np->r_statelock); 33484bff34e3Sthurlow dnp->n_flag |= NMODIFIED; 33494bff34e3Sthurlow mutex_exit(&np->r_statelock); 33504bff34e3Sthurlow smbfs_attr_touchdir(dnp); 335102d09e03SGordon Ross smbfs_rmhash(np); 33524bff34e3Sthurlow 33534bff34e3Sthurlow out: 33544bff34e3Sthurlow if (vp) { 33554bff34e3Sthurlow if (vp_locked) 33564bff34e3Sthurlow vn_vfsunlock(vp); 33574bff34e3Sthurlow VN_RELE(vp); 33584bff34e3Sthurlow } 33594bff34e3Sthurlow smb_credrele(&scred); 33604bff34e3Sthurlow smbfs_rw_exit(&dnp->r_rwlock); 33614bff34e3Sthurlow 33624bff34e3Sthurlow return (error); 33634bff34e3Sthurlow } 33644bff34e3Sthurlow 33654bff34e3Sthurlow 33665f4fc069Sjilinxpd /* ARGSUSED */ 33675f4fc069Sjilinxpd static int 33685f4fc069Sjilinxpd smbfs_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr, 33695f4fc069Sjilinxpd caller_context_t *ct, int flags) 33705f4fc069Sjilinxpd { 33715f4fc069Sjilinxpd /* Not yet... */ 33725f4fc069Sjilinxpd return (ENOSYS); 33735f4fc069Sjilinxpd } 33745f4fc069Sjilinxpd 33755f4fc069Sjilinxpd 33764bff34e3Sthurlow /* ARGSUSED */ 33774bff34e3Sthurlow static int 33784bff34e3Sthurlow smbfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp, 33794bff34e3Sthurlow caller_context_t *ct, int flags) 33804bff34e3Sthurlow { 33814bff34e3Sthurlow struct smbnode *np = VTOSMB(vp); 33824bff34e3Sthurlow int error = 0; 33834bff34e3Sthurlow smbmntinfo_t *smi; 33844bff34e3Sthurlow 33854bff34e3Sthurlow smi = VTOSMI(vp); 33864bff34e3Sthurlow 3387a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 33884bff34e3Sthurlow return (EIO); 33894bff34e3Sthurlow 33904bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 33914bff34e3Sthurlow return (EIO); 33924bff34e3Sthurlow 33934bff34e3Sthurlow /* 33944bff34e3Sthurlow * Require read access in the directory. 33954bff34e3Sthurlow */ 33964bff34e3Sthurlow error = smbfs_access(vp, VREAD, 0, cr, ct); 33974bff34e3Sthurlow if (error) 33984bff34e3Sthurlow return (error); 33994bff34e3Sthurlow 34004bff34e3Sthurlow ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER)); 34014bff34e3Sthurlow 34024bff34e3Sthurlow /* 34035f4fc069Sjilinxpd * Todo readdir cache here 34044bff34e3Sthurlow * 34054bff34e3Sthurlow * I am serializing the entire readdir opreation 34064bff34e3Sthurlow * now since we have not yet implemented readdir 34074bff34e3Sthurlow * cache. This fix needs to be revisited once 34084bff34e3Sthurlow * we implement readdir cache. 34094bff34e3Sthurlow */ 34104bff34e3Sthurlow if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp))) 34114bff34e3Sthurlow return (EINTR); 34124bff34e3Sthurlow 34134bff34e3Sthurlow error = smbfs_readvdir(vp, uiop, cr, eofp, ct); 34144bff34e3Sthurlow 34154bff34e3Sthurlow smbfs_rw_exit(&np->r_lkserlock); 34164bff34e3Sthurlow 34174bff34e3Sthurlow return (error); 34184bff34e3Sthurlow } 34194bff34e3Sthurlow 34204bff34e3Sthurlow /* ARGSUSED */ 34214bff34e3Sthurlow static int 34224bff34e3Sthurlow smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, 34234bff34e3Sthurlow caller_context_t *ct) 34244bff34e3Sthurlow { 34255ecede33SGordon Ross /* 34265ecede33SGordon Ross * Note: "limit" tells the SMB-level FindFirst/FindNext 34275ecede33SGordon Ross * functions how many directory entries to request in 34285ecede33SGordon Ross * each OtW call. It needs to be large enough so that 34295ecede33SGordon Ross * we don't make lots of tiny OtW requests, but there's 34305ecede33SGordon Ross * no point making it larger than the maximum number of 34315ecede33SGordon Ross * OtW entries that would fit in a maximum sized trans2 34325ecede33SGordon Ross * response (64k / 48). Beyond that, it's just tuning. 34335ecede33SGordon Ross * WinNT used 512, Win2k used 1366. We use 1000. 34345ecede33SGordon Ross */ 34355ecede33SGordon Ross static const int limit = 1000; 34365ecede33SGordon Ross /* Largest possible dirent size. */ 34375ecede33SGordon Ross static const size_t dbufsiz = DIRENT64_RECLEN(SMB_MAXFNAMELEN); 34384bff34e3Sthurlow struct smb_cred scred; 34394bff34e3Sthurlow vnode_t *newvp; 34404bff34e3Sthurlow struct smbnode *np = VTOSMB(vp); 34414bff34e3Sthurlow struct smbfs_fctx *ctx; 34425ecede33SGordon Ross struct dirent64 *dp; 34435ecede33SGordon Ross ssize_t save_resid; 34445ecede33SGordon Ross offset_t save_offset; /* 64 bits */ 34455ecede33SGordon Ross int offset; /* yes, 32 bits */ 34465ecede33SGordon Ross int nmlen, error; 34475ecede33SGordon Ross ushort_t reclen; 34484bff34e3Sthurlow 3449a19609f8Sjv ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone_ref.zref_zone); 34504bff34e3Sthurlow 34514bff34e3Sthurlow /* Make sure we serialize for n_dirseq use. */ 34524bff34e3Sthurlow ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER)); 34534bff34e3Sthurlow 34545ecede33SGordon Ross /* 34555ecede33SGordon Ross * Make sure smbfs_open filled in n_dirseq 34565ecede33SGordon Ross */ 34575ecede33SGordon Ross if (np->n_dirseq == NULL) 34585ecede33SGordon Ross return (EBADF); 34595ecede33SGordon Ross 34605ecede33SGordon Ross /* Check for overflow of (32-bit) directory offset. */ 34615ecede33SGordon Ross if (uio->uio_loffset < 0 || uio->uio_loffset > INT32_MAX || 34625ecede33SGordon Ross (uio->uio_loffset + uio->uio_resid) > INT32_MAX) 34635ecede33SGordon Ross return (EINVAL); 34645ecede33SGordon Ross 34655ecede33SGordon Ross /* Require space for at least one dirent. */ 34665ecede33SGordon Ross if (uio->uio_resid < dbufsiz) 34674bff34e3Sthurlow return (EINVAL); 34684bff34e3Sthurlow 34694bff34e3Sthurlow SMBVDEBUG("dirname='%s'\n", np->n_rpath); 3470613a2f6bSGordon Ross smb_credinit(&scred, cr); 34714bff34e3Sthurlow dp = kmem_alloc(dbufsiz, KM_SLEEP); 34724bff34e3Sthurlow 34735ecede33SGordon Ross save_resid = uio->uio_resid; 34745ecede33SGordon Ross save_offset = uio->uio_loffset; 34755ecede33SGordon Ross offset = uio->uio_offset; 34765ecede33SGordon Ross SMBVDEBUG("in: offset=%d, resid=%d\n", 34775ecede33SGordon Ross (int)uio->uio_offset, (int)uio->uio_resid); 34785ecede33SGordon Ross error = 0; 34794bff34e3Sthurlow 34804bff34e3Sthurlow /* 34814bff34e3Sthurlow * Generate the "." and ".." entries here so we can 34824bff34e3Sthurlow * (1) make sure they appear (but only once), and 34834bff34e3Sthurlow * (2) deal with getting their I numbers which the 34844bff34e3Sthurlow * findnext below does only for normal names. 34854bff34e3Sthurlow */ 34865ecede33SGordon Ross while (offset < FIRST_DIROFS) { 34875ecede33SGordon Ross /* 34885ecede33SGordon Ross * Tricky bit filling in the first two: 34895ecede33SGordon Ross * offset 0 is ".", offset 1 is ".." 34905ecede33SGordon Ross * so strlen of these is offset+1. 34915ecede33SGordon Ross */ 34924bff34e3Sthurlow reclen = DIRENT64_RECLEN(offset + 1); 34935ecede33SGordon Ross if (uio->uio_resid < reclen) 34945ecede33SGordon Ross goto out; 34954bff34e3Sthurlow bzero(dp, reclen); 34964bff34e3Sthurlow dp->d_reclen = reclen; 34974bff34e3Sthurlow dp->d_name[0] = '.'; 34984bff34e3Sthurlow dp->d_name[1] = '.'; 34994bff34e3Sthurlow dp->d_name[offset + 1] = '\0'; 35004bff34e3Sthurlow /* 35014bff34e3Sthurlow * Want the real I-numbers for the "." and ".." 35024bff34e3Sthurlow * entries. For these two names, we know that 35035ecede33SGordon Ross * smbfslookup can get the nodes efficiently. 35044bff34e3Sthurlow */ 35054bff34e3Sthurlow error = smbfslookup(vp, dp->d_name, &newvp, cr, 1, ct); 35064bff34e3Sthurlow if (error) { 35074bff34e3Sthurlow dp->d_ino = np->n_ino + offset; /* fiction */ 35084bff34e3Sthurlow } else { 35094bff34e3Sthurlow dp->d_ino = VTOSMB(newvp)->n_ino; 35104bff34e3Sthurlow VN_RELE(newvp); 35114bff34e3Sthurlow } 35125ecede33SGordon Ross /* 35135ecede33SGordon Ross * Note: d_off is the offset that a user-level program 35145ecede33SGordon Ross * should seek to for reading the NEXT directory entry. 35155ecede33SGordon Ross * See libc: readdir, telldir, seekdir 35165ecede33SGordon Ross */ 35175ecede33SGordon Ross dp->d_off = offset + 1; 35185ecede33SGordon Ross error = uiomove(dp, reclen, UIO_READ, uio); 35194bff34e3Sthurlow if (error) 35204bff34e3Sthurlow goto out; 35215ecede33SGordon Ross /* 35225ecede33SGordon Ross * Note: uiomove updates uio->uio_offset, 35235ecede33SGordon Ross * but we want it to be our "cookie" value, 35245ecede33SGordon Ross * which just counts dirents ignoring size. 35255ecede33SGordon Ross */ 35264bff34e3Sthurlow uio->uio_offset = ++offset; 35274bff34e3Sthurlow } 35285ecede33SGordon Ross 35295ecede33SGordon Ross /* 35305ecede33SGordon Ross * If there was a backward seek, we have to reopen. 35315ecede33SGordon Ross */ 35325ecede33SGordon Ross if (offset < np->n_dirofs) { 35335ecede33SGordon Ross SMBVDEBUG("Reopening search %d:%d\n", 35345ecede33SGordon Ross offset, np->n_dirofs); 35354bff34e3Sthurlow error = smbfs_smb_findopen(np, "*", 1, 35364bff34e3Sthurlow SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR, 35374bff34e3Sthurlow &scred, &ctx); 35384bff34e3Sthurlow if (error) { 35394bff34e3Sthurlow SMBVDEBUG("can not open search, error = %d", error); 35404bff34e3Sthurlow goto out; 35414bff34e3Sthurlow } 35425ecede33SGordon Ross /* free the old one */ 35435ecede33SGordon Ross (void) smbfs_smb_findclose(np->n_dirseq, &scred); 35445ecede33SGordon Ross /* save the new one */ 35454bff34e3Sthurlow np->n_dirseq = ctx; 35465ecede33SGordon Ross np->n_dirofs = FIRST_DIROFS; 35475ecede33SGordon Ross } else { 35484bff34e3Sthurlow ctx = np->n_dirseq; 35495ecede33SGordon Ross } 35505ecede33SGordon Ross 35515ecede33SGordon Ross /* 35525ecede33SGordon Ross * Skip entries before the requested offset. 35535ecede33SGordon Ross */ 35544bff34e3Sthurlow while (np->n_dirofs < offset) { 35555ecede33SGordon Ross error = smbfs_smb_findnext(ctx, limit, &scred); 35565ecede33SGordon Ross if (error != 0) 35574bff34e3Sthurlow goto out; 35585ecede33SGordon Ross np->n_dirofs++; 35594bff34e3Sthurlow } 35605ecede33SGordon Ross 35615ecede33SGordon Ross /* 35625ecede33SGordon Ross * While there's room in the caller's buffer: 35635ecede33SGordon Ross * get a directory entry from SMB, 35645ecede33SGordon Ross * convert to a dirent, copyout. 35655ecede33SGordon Ross * We stop when there is no longer room for a 35665ecede33SGordon Ross * maximum sized dirent because we must decide 35675ecede33SGordon Ross * before we know anything about the next entry. 35685ecede33SGordon Ross */ 35695ecede33SGordon Ross while (uio->uio_resid >= dbufsiz) { 35704bff34e3Sthurlow error = smbfs_smb_findnext(ctx, limit, &scred); 35715ecede33SGordon Ross if (error != 0) 35725ecede33SGordon Ross goto out; 35734bff34e3Sthurlow np->n_dirofs++; 35745ecede33SGordon Ross 35754bff34e3Sthurlow /* Sanity check the name length. */ 35764bff34e3Sthurlow nmlen = ctx->f_nmlen; 3577613a2f6bSGordon Ross if (nmlen > SMB_MAXFNAMELEN) { 3578613a2f6bSGordon Ross nmlen = SMB_MAXFNAMELEN; 35794bff34e3Sthurlow SMBVDEBUG("Truncating name: %s\n", ctx->f_name); 35804bff34e3Sthurlow } 35814bff34e3Sthurlow if (smbfs_fastlookup) { 358202d09e03SGordon Ross /* See comment at smbfs_fastlookup above. */ 35835ecede33SGordon Ross if (smbfs_nget(vp, ctx->f_name, nmlen, 35845ecede33SGordon Ross &ctx->f_attr, &newvp) == 0) 35854bff34e3Sthurlow VN_RELE(newvp); 35864bff34e3Sthurlow } 35875ecede33SGordon Ross 35885ecede33SGordon Ross reclen = DIRENT64_RECLEN(nmlen); 35895ecede33SGordon Ross bzero(dp, reclen); 35905ecede33SGordon Ross dp->d_reclen = reclen; 35915ecede33SGordon Ross bcopy(ctx->f_name, dp->d_name, nmlen); 35925ecede33SGordon Ross dp->d_name[nmlen] = '\0'; 359302d09e03SGordon Ross dp->d_ino = ctx->f_inum; 35945ecede33SGordon Ross dp->d_off = offset + 1; /* See d_off comment above */ 35955ecede33SGordon Ross error = uiomove(dp, reclen, UIO_READ, uio); 35964bff34e3Sthurlow if (error) 35975ecede33SGordon Ross goto out; 35985ecede33SGordon Ross /* See comment re. uio_offset above. */ 35994bff34e3Sthurlow uio->uio_offset = ++offset; 36004bff34e3Sthurlow } 36015ecede33SGordon Ross 36024bff34e3Sthurlow out: 36035ecede33SGordon Ross /* 36045ecede33SGordon Ross * When we come to the end of a directory, the 36055ecede33SGordon Ross * SMB-level functions return ENOENT, but the 36065ecede33SGordon Ross * caller is not expecting an error return. 36075ecede33SGordon Ross * 36085ecede33SGordon Ross * Also note that we must delay the call to 36095ecede33SGordon Ross * smbfs_smb_findclose(np->n_dirseq, ...) 36105ecede33SGordon Ross * until smbfs_close so that all reads at the 36115ecede33SGordon Ross * end of the directory will return no data. 36125ecede33SGordon Ross */ 36135ecede33SGordon Ross if (error == ENOENT) { 36145ecede33SGordon Ross error = 0; 36155ecede33SGordon Ross if (eofp) 36165ecede33SGordon Ross *eofp = 1; 36175ecede33SGordon Ross } 36185ecede33SGordon Ross /* 36195ecede33SGordon Ross * If we encountered an error (i.e. "access denied") 36205ecede33SGordon Ross * from the FindFirst call, we will have copied out 36215ecede33SGordon Ross * the "." and ".." entries leaving offset == 2. 36225ecede33SGordon Ross * In that case, restore the original offset/resid 36235ecede33SGordon Ross * so the caller gets no data with the error. 36245ecede33SGordon Ross */ 36255ecede33SGordon Ross if (error != 0 && offset == FIRST_DIROFS) { 36265ecede33SGordon Ross uio->uio_loffset = save_offset; 36275ecede33SGordon Ross uio->uio_resid = save_resid; 36285ecede33SGordon Ross } 36295ecede33SGordon Ross SMBVDEBUG("out: offset=%d, resid=%d\n", 36305ecede33SGordon Ross (int)uio->uio_offset, (int)uio->uio_resid); 36315ecede33SGordon Ross 36324bff34e3Sthurlow kmem_free(dp, dbufsiz); 36334bff34e3Sthurlow smb_credrele(&scred); 36344bff34e3Sthurlow return (error); 36354bff34e3Sthurlow } 36364bff34e3Sthurlow 36375f4fc069Sjilinxpd /* 36385f4fc069Sjilinxpd * Here NFS has: nfs3_bio 36395f4fc069Sjilinxpd * See smbfs_bio above. 36405f4fc069Sjilinxpd */ 36415f4fc069Sjilinxpd 36425f4fc069Sjilinxpd /* ARGSUSED */ 36435f4fc069Sjilinxpd static int 36445f4fc069Sjilinxpd smbfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct) 36455f4fc069Sjilinxpd { 36465f4fc069Sjilinxpd return (ENOSYS); 36475f4fc069Sjilinxpd } 36485f4fc069Sjilinxpd 36494bff34e3Sthurlow 36504bff34e3Sthurlow /* 36514bff34e3Sthurlow * The pair of functions VOP_RWLOCK, VOP_RWUNLOCK 36524bff34e3Sthurlow * are optional functions that are called by: 36534bff34e3Sthurlow * getdents, before/after VOP_READDIR 36544bff34e3Sthurlow * pread, before/after ... VOP_READ 36554bff34e3Sthurlow * pwrite, before/after ... VOP_WRITE 36564bff34e3Sthurlow * (other places) 36574bff34e3Sthurlow * 36584bff34e3Sthurlow * Careful here: None of the above check for any 36594bff34e3Sthurlow * error returns from VOP_RWLOCK / VOP_RWUNLOCK! 36604bff34e3Sthurlow * In fact, the return value from _rwlock is NOT 36614bff34e3Sthurlow * an error code, but V_WRITELOCK_TRUE / _FALSE. 36624bff34e3Sthurlow * 36634bff34e3Sthurlow * Therefore, it's up to _this_ code to make sure 36644bff34e3Sthurlow * the lock state remains balanced, which means 36654bff34e3Sthurlow * we can't "bail out" on interrupts, etc. 36664bff34e3Sthurlow */ 36674bff34e3Sthurlow 36684bff34e3Sthurlow /* ARGSUSED2 */ 36694bff34e3Sthurlow static int 36704bff34e3Sthurlow smbfs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp) 36714bff34e3Sthurlow { 36724bff34e3Sthurlow smbnode_t *np = VTOSMB(vp); 36734bff34e3Sthurlow 36744bff34e3Sthurlow if (!write_lock) { 36754bff34e3Sthurlow (void) smbfs_rw_enter_sig(&np->r_rwlock, RW_READER, FALSE); 36764bff34e3Sthurlow return (V_WRITELOCK_FALSE); 36774bff34e3Sthurlow } 36784bff34e3Sthurlow 36794bff34e3Sthurlow 36804bff34e3Sthurlow (void) smbfs_rw_enter_sig(&np->r_rwlock, RW_WRITER, FALSE); 36814bff34e3Sthurlow return (V_WRITELOCK_TRUE); 36824bff34e3Sthurlow } 36834bff34e3Sthurlow 36844bff34e3Sthurlow /* ARGSUSED */ 36854bff34e3Sthurlow static void 36864bff34e3Sthurlow smbfs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp) 36874bff34e3Sthurlow { 36884bff34e3Sthurlow smbnode_t *np = VTOSMB(vp); 36894bff34e3Sthurlow 36904bff34e3Sthurlow smbfs_rw_exit(&np->r_rwlock); 36914bff34e3Sthurlow } 36924bff34e3Sthurlow 36934bff34e3Sthurlow 36944bff34e3Sthurlow /* ARGSUSED */ 36954bff34e3Sthurlow static int 36964bff34e3Sthurlow smbfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct) 36974bff34e3Sthurlow { 36984bff34e3Sthurlow smbmntinfo_t *smi; 36994bff34e3Sthurlow 37004bff34e3Sthurlow smi = VTOSMI(vp); 37014bff34e3Sthurlow 3702a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 37034bff34e3Sthurlow return (EPERM); 37044bff34e3Sthurlow 37054bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 37064bff34e3Sthurlow return (EIO); 37074bff34e3Sthurlow 37084bff34e3Sthurlow /* 37094bff34e3Sthurlow * Because we stuff the readdir cookie into the offset field 37104bff34e3Sthurlow * someone may attempt to do an lseek with the cookie which 37114bff34e3Sthurlow * we want to succeed. 37124bff34e3Sthurlow */ 37134bff34e3Sthurlow if (vp->v_type == VDIR) 37144bff34e3Sthurlow return (0); 37154bff34e3Sthurlow 37164bff34e3Sthurlow /* Like NFS3, just check for 63-bit overflow. */ 37174bff34e3Sthurlow if (*noffp < 0) 37184bff34e3Sthurlow return (EINVAL); 37194bff34e3Sthurlow 37204bff34e3Sthurlow return (0); 37214bff34e3Sthurlow } 37224bff34e3Sthurlow 37235f4fc069Sjilinxpd /* mmap support ******************************************************** */ 37245f4fc069Sjilinxpd 3725*8329232eSGordon Ross #ifdef _KERNEL 3726*8329232eSGordon Ross 37275f4fc069Sjilinxpd #ifdef DEBUG 37285f4fc069Sjilinxpd static int smbfs_lostpage = 0; /* number of times we lost original page */ 37295f4fc069Sjilinxpd #endif 37305f4fc069Sjilinxpd 37315f4fc069Sjilinxpd /* 37325f4fc069Sjilinxpd * Return all the pages from [off..off+len) in file 37335f4fc069Sjilinxpd * Like nfs3_getpage 37345f4fc069Sjilinxpd */ 37355f4fc069Sjilinxpd /* ARGSUSED */ 37365f4fc069Sjilinxpd static int 37375f4fc069Sjilinxpd smbfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp, 37385f4fc069Sjilinxpd page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, 37395f4fc069Sjilinxpd enum seg_rw rw, cred_t *cr, caller_context_t *ct) 37405f4fc069Sjilinxpd { 37415f4fc069Sjilinxpd smbnode_t *np; 37425f4fc069Sjilinxpd smbmntinfo_t *smi; 37435f4fc069Sjilinxpd int error; 37445f4fc069Sjilinxpd 37455f4fc069Sjilinxpd np = VTOSMB(vp); 37465f4fc069Sjilinxpd smi = VTOSMI(vp); 37475f4fc069Sjilinxpd 37485f4fc069Sjilinxpd if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 37495f4fc069Sjilinxpd return (EIO); 37505f4fc069Sjilinxpd 37515f4fc069Sjilinxpd if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 37525f4fc069Sjilinxpd return (EIO); 37535f4fc069Sjilinxpd 37545f4fc069Sjilinxpd if (vp->v_flag & VNOMAP) 37555f4fc069Sjilinxpd return (ENOSYS); 37565f4fc069Sjilinxpd 37575f4fc069Sjilinxpd if (protp != NULL) 37585f4fc069Sjilinxpd *protp = PROT_ALL; 37595f4fc069Sjilinxpd 37605f4fc069Sjilinxpd /* 37615f4fc069Sjilinxpd * Now valididate that the caches are up to date. 37625f4fc069Sjilinxpd */ 37635f4fc069Sjilinxpd error = smbfs_validate_caches(vp, cr); 37645f4fc069Sjilinxpd if (error) 37655f4fc069Sjilinxpd return (error); 37665f4fc069Sjilinxpd 37675f4fc069Sjilinxpd retry: 37685f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 37695f4fc069Sjilinxpd 37705f4fc069Sjilinxpd /* 37715f4fc069Sjilinxpd * Don't create dirty pages faster than they 37725f4fc069Sjilinxpd * can be cleaned ... (etc. see nfs) 37735f4fc069Sjilinxpd * 37745f4fc069Sjilinxpd * Here NFS also tests: 37755f4fc069Sjilinxpd * (mi->mi_max_threads != 0 && 37765f4fc069Sjilinxpd * rp->r_awcount > 2 * mi->mi_max_threads) 37775f4fc069Sjilinxpd */ 37785f4fc069Sjilinxpd if (rw == S_CREATE) { 37795f4fc069Sjilinxpd while (np->r_gcount > 0) 37805f4fc069Sjilinxpd cv_wait(&np->r_cv, &np->r_statelock); 37815f4fc069Sjilinxpd } 37825f4fc069Sjilinxpd 37835f4fc069Sjilinxpd /* 37845f4fc069Sjilinxpd * If we are getting called as a side effect of a write 37855f4fc069Sjilinxpd * operation the local file size might not be extended yet. 37865f4fc069Sjilinxpd * In this case we want to be able to return pages of zeroes. 37875f4fc069Sjilinxpd */ 37885f4fc069Sjilinxpd if (off + len > np->r_size + PAGEOFFSET && seg != segkmap) { 37895f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 37905f4fc069Sjilinxpd return (EFAULT); /* beyond EOF */ 37915f4fc069Sjilinxpd } 37925f4fc069Sjilinxpd 37935f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 37945f4fc069Sjilinxpd 37955f4fc069Sjilinxpd error = pvn_getpages(smbfs_getapage, vp, off, len, protp, 37965f4fc069Sjilinxpd pl, plsz, seg, addr, rw, cr); 37975f4fc069Sjilinxpd 37985f4fc069Sjilinxpd switch (error) { 37995f4fc069Sjilinxpd case SMBFS_EOF: 38005f4fc069Sjilinxpd smbfs_purge_caches(vp, cr); 38015f4fc069Sjilinxpd goto retry; 38025f4fc069Sjilinxpd case ESTALE: 38035f4fc069Sjilinxpd /* 38045f4fc069Sjilinxpd * Here NFS has: PURGE_STALE_FH(error, vp, cr); 38055f4fc069Sjilinxpd * In-line here as we only use it once. 38065f4fc069Sjilinxpd */ 38075f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 38085f4fc069Sjilinxpd np->r_flags |= RSTALE; 38095f4fc069Sjilinxpd if (!np->r_error) 38105f4fc069Sjilinxpd np->r_error = (error); 38115f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 38125f4fc069Sjilinxpd if (vn_has_cached_data(vp)) 38135f4fc069Sjilinxpd smbfs_invalidate_pages(vp, (u_offset_t)0, cr); 38145f4fc069Sjilinxpd smbfs_purge_caches(vp, cr); 38155f4fc069Sjilinxpd break; 38165f4fc069Sjilinxpd default: 38175f4fc069Sjilinxpd break; 38185f4fc069Sjilinxpd } 38195f4fc069Sjilinxpd 38205f4fc069Sjilinxpd return (error); 38215f4fc069Sjilinxpd } 38225f4fc069Sjilinxpd 38235f4fc069Sjilinxpd /* 38245f4fc069Sjilinxpd * Called from pvn_getpages to get a particular page. 38255f4fc069Sjilinxpd * Like nfs3_getapage 38265f4fc069Sjilinxpd */ 38275f4fc069Sjilinxpd /* ARGSUSED */ 38285f4fc069Sjilinxpd static int 38295f4fc069Sjilinxpd smbfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp, 38305f4fc069Sjilinxpd page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, 38315f4fc069Sjilinxpd enum seg_rw rw, cred_t *cr) 38325f4fc069Sjilinxpd { 38335f4fc069Sjilinxpd smbnode_t *np; 38345f4fc069Sjilinxpd smbmntinfo_t *smi; 38355f4fc069Sjilinxpd 38365f4fc069Sjilinxpd uint_t bsize; 38375f4fc069Sjilinxpd struct buf *bp; 38385f4fc069Sjilinxpd page_t *pp; 38395f4fc069Sjilinxpd u_offset_t lbn; 38405f4fc069Sjilinxpd u_offset_t io_off; 38415f4fc069Sjilinxpd u_offset_t blkoff; 38425f4fc069Sjilinxpd size_t io_len; 38435f4fc069Sjilinxpd uint_t blksize; 38445f4fc069Sjilinxpd int error; 38455f4fc069Sjilinxpd /* int readahead; */ 38465f4fc069Sjilinxpd int readahead_issued = 0; 38475f4fc069Sjilinxpd /* int ra_window; * readahead window */ 38485f4fc069Sjilinxpd page_t *pagefound; 38495f4fc069Sjilinxpd 38505f4fc069Sjilinxpd np = VTOSMB(vp); 38515f4fc069Sjilinxpd smi = VTOSMI(vp); 38525f4fc069Sjilinxpd 38535f4fc069Sjilinxpd if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 38545f4fc069Sjilinxpd return (EIO); 38555f4fc069Sjilinxpd 38565f4fc069Sjilinxpd if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 38575f4fc069Sjilinxpd return (EIO); 38585f4fc069Sjilinxpd 38595f4fc069Sjilinxpd bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE); 38605f4fc069Sjilinxpd 38615f4fc069Sjilinxpd reread: 38625f4fc069Sjilinxpd bp = NULL; 38635f4fc069Sjilinxpd pp = NULL; 38645f4fc069Sjilinxpd pagefound = NULL; 38655f4fc069Sjilinxpd 38665f4fc069Sjilinxpd if (pl != NULL) 38675f4fc069Sjilinxpd pl[0] = NULL; 38685f4fc069Sjilinxpd 38695f4fc069Sjilinxpd error = 0; 38705f4fc069Sjilinxpd lbn = off / bsize; 38715f4fc069Sjilinxpd blkoff = lbn * bsize; 38725f4fc069Sjilinxpd 38735f4fc069Sjilinxpd /* 38745f4fc069Sjilinxpd * NFS queues up readahead work here. 38755f4fc069Sjilinxpd */ 38765f4fc069Sjilinxpd 38775f4fc069Sjilinxpd again: 38785f4fc069Sjilinxpd if ((pagefound = page_exists(vp, off)) == NULL) { 38795f4fc069Sjilinxpd if (pl == NULL) { 38805f4fc069Sjilinxpd (void) 0; /* Todo: smbfs_async_readahead(); */ 38815f4fc069Sjilinxpd } else if (rw == S_CREATE) { 38825f4fc069Sjilinxpd /* 38835f4fc069Sjilinxpd * Block for this page is not allocated, or the offset 38845f4fc069Sjilinxpd * is beyond the current allocation size, or we're 38855f4fc069Sjilinxpd * allocating a swap slot and the page was not found, 38865f4fc069Sjilinxpd * so allocate it and return a zero page. 38875f4fc069Sjilinxpd */ 38885f4fc069Sjilinxpd if ((pp = page_create_va(vp, off, 38895f4fc069Sjilinxpd PAGESIZE, PG_WAIT, seg, addr)) == NULL) 38905f4fc069Sjilinxpd cmn_err(CE_PANIC, "smbfs_getapage: page_create"); 38915f4fc069Sjilinxpd io_len = PAGESIZE; 38925f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 38935f4fc069Sjilinxpd np->r_nextr = off + PAGESIZE; 38945f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 38955f4fc069Sjilinxpd } else { 38965f4fc069Sjilinxpd /* 38975f4fc069Sjilinxpd * Need to go to server to get a BLOCK, exception to 38985f4fc069Sjilinxpd * that being while reading at offset = 0 or doing 38995f4fc069Sjilinxpd * random i/o, in that case read only a PAGE. 39005f4fc069Sjilinxpd */ 39015f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 39025f4fc069Sjilinxpd if (blkoff < np->r_size && 39035f4fc069Sjilinxpd blkoff + bsize >= np->r_size) { 39045f4fc069Sjilinxpd /* 39055f4fc069Sjilinxpd * If only a block or less is left in 39065f4fc069Sjilinxpd * the file, read all that is remaining. 39075f4fc069Sjilinxpd */ 39085f4fc069Sjilinxpd if (np->r_size <= off) { 39095f4fc069Sjilinxpd /* 39105f4fc069Sjilinxpd * Trying to access beyond EOF, 39115f4fc069Sjilinxpd * set up to get at least one page. 39125f4fc069Sjilinxpd */ 39135f4fc069Sjilinxpd blksize = off + PAGESIZE - blkoff; 39145f4fc069Sjilinxpd } else 39155f4fc069Sjilinxpd blksize = np->r_size - blkoff; 39165f4fc069Sjilinxpd } else if ((off == 0) || 39175f4fc069Sjilinxpd (off != np->r_nextr && !readahead_issued)) { 39185f4fc069Sjilinxpd blksize = PAGESIZE; 39195f4fc069Sjilinxpd blkoff = off; /* block = page here */ 39205f4fc069Sjilinxpd } else 39215f4fc069Sjilinxpd blksize = bsize; 39225f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 39235f4fc069Sjilinxpd 39245f4fc069Sjilinxpd pp = pvn_read_kluster(vp, off, seg, addr, &io_off, 39255f4fc069Sjilinxpd &io_len, blkoff, blksize, 0); 39265f4fc069Sjilinxpd 39275f4fc069Sjilinxpd /* 39285f4fc069Sjilinxpd * Some other thread has entered the page, 39295f4fc069Sjilinxpd * so just use it. 39305f4fc069Sjilinxpd */ 39315f4fc069Sjilinxpd if (pp == NULL) 39325f4fc069Sjilinxpd goto again; 39335f4fc069Sjilinxpd 39345f4fc069Sjilinxpd /* 39355f4fc069Sjilinxpd * Now round the request size up to page boundaries. 39365f4fc069Sjilinxpd * This ensures that the entire page will be 39375f4fc069Sjilinxpd * initialized to zeroes if EOF is encountered. 39385f4fc069Sjilinxpd */ 39395f4fc069Sjilinxpd io_len = ptob(btopr(io_len)); 39405f4fc069Sjilinxpd 39415f4fc069Sjilinxpd bp = pageio_setup(pp, io_len, vp, B_READ); 39425f4fc069Sjilinxpd ASSERT(bp != NULL); 39435f4fc069Sjilinxpd 39445f4fc069Sjilinxpd /* 39455f4fc069Sjilinxpd * pageio_setup should have set b_addr to 0. This 39465f4fc069Sjilinxpd * is correct since we want to do I/O on a page 39475f4fc069Sjilinxpd * boundary. bp_mapin will use this addr to calculate 39485f4fc069Sjilinxpd * an offset, and then set b_addr to the kernel virtual 39495f4fc069Sjilinxpd * address it allocated for us. 39505f4fc069Sjilinxpd */ 39515f4fc069Sjilinxpd ASSERT(bp->b_un.b_addr == 0); 39525f4fc069Sjilinxpd 39535f4fc069Sjilinxpd bp->b_edev = 0; 39545f4fc069Sjilinxpd bp->b_dev = 0; 39555f4fc069Sjilinxpd bp->b_lblkno = lbtodb(io_off); 39565f4fc069Sjilinxpd bp->b_file = vp; 39575f4fc069Sjilinxpd bp->b_offset = (offset_t)off; 39585f4fc069Sjilinxpd bp_mapin(bp); 39595f4fc069Sjilinxpd 39605f4fc069Sjilinxpd /* 39615f4fc069Sjilinxpd * If doing a write beyond what we believe is EOF, 39625f4fc069Sjilinxpd * don't bother trying to read the pages from the 39635f4fc069Sjilinxpd * server, we'll just zero the pages here. We 39645f4fc069Sjilinxpd * don't check that the rw flag is S_WRITE here 39655f4fc069Sjilinxpd * because some implementations may attempt a 39665f4fc069Sjilinxpd * read access to the buffer before copying data. 39675f4fc069Sjilinxpd */ 39685f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 39695f4fc069Sjilinxpd if (io_off >= np->r_size && seg == segkmap) { 39705f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 39715f4fc069Sjilinxpd bzero(bp->b_un.b_addr, io_len); 39725f4fc069Sjilinxpd } else { 39735f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 39745f4fc069Sjilinxpd error = smbfs_bio(bp, 0, cr); 39755f4fc069Sjilinxpd } 39765f4fc069Sjilinxpd 39775f4fc069Sjilinxpd /* 39785f4fc069Sjilinxpd * Unmap the buffer before freeing it. 39795f4fc069Sjilinxpd */ 39805f4fc069Sjilinxpd bp_mapout(bp); 39815f4fc069Sjilinxpd pageio_done(bp); 39825f4fc069Sjilinxpd 39835f4fc069Sjilinxpd /* Here NFS3 updates all pp->p_fsdata */ 39845f4fc069Sjilinxpd 39855f4fc069Sjilinxpd if (error == SMBFS_EOF) { 39865f4fc069Sjilinxpd /* 39875f4fc069Sjilinxpd * If doing a write system call just return 39885f4fc069Sjilinxpd * zeroed pages, else user tried to get pages 39895f4fc069Sjilinxpd * beyond EOF, return error. We don't check 39905f4fc069Sjilinxpd * that the rw flag is S_WRITE here because 39915f4fc069Sjilinxpd * some implementations may attempt a read 39925f4fc069Sjilinxpd * access to the buffer before copying data. 39935f4fc069Sjilinxpd */ 39945f4fc069Sjilinxpd if (seg == segkmap) 39955f4fc069Sjilinxpd error = 0; 39965f4fc069Sjilinxpd else 39975f4fc069Sjilinxpd error = EFAULT; 39985f4fc069Sjilinxpd } 39995f4fc069Sjilinxpd 40005f4fc069Sjilinxpd if (!readahead_issued && !error) { 40015f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 40025f4fc069Sjilinxpd np->r_nextr = io_off + io_len; 40035f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 40045f4fc069Sjilinxpd } 40055f4fc069Sjilinxpd } 40065f4fc069Sjilinxpd } 40075f4fc069Sjilinxpd 40085f4fc069Sjilinxpd if (pl == NULL) 40095f4fc069Sjilinxpd return (error); 40105f4fc069Sjilinxpd 40115f4fc069Sjilinxpd if (error) { 40125f4fc069Sjilinxpd if (pp != NULL) 40135f4fc069Sjilinxpd pvn_read_done(pp, B_ERROR); 40145f4fc069Sjilinxpd return (error); 40155f4fc069Sjilinxpd } 40165f4fc069Sjilinxpd 40175f4fc069Sjilinxpd if (pagefound) { 40185f4fc069Sjilinxpd se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED); 40195f4fc069Sjilinxpd 40205f4fc069Sjilinxpd /* 40215f4fc069Sjilinxpd * Page exists in the cache, acquire the appropriate lock. 40225f4fc069Sjilinxpd * If this fails, start all over again. 40235f4fc069Sjilinxpd */ 40245f4fc069Sjilinxpd if ((pp = page_lookup(vp, off, se)) == NULL) { 40255f4fc069Sjilinxpd #ifdef DEBUG 40265f4fc069Sjilinxpd smbfs_lostpage++; 40275f4fc069Sjilinxpd #endif 40285f4fc069Sjilinxpd goto reread; 40295f4fc069Sjilinxpd } 40305f4fc069Sjilinxpd pl[0] = pp; 40315f4fc069Sjilinxpd pl[1] = NULL; 40325f4fc069Sjilinxpd return (0); 40335f4fc069Sjilinxpd } 40345f4fc069Sjilinxpd 40355f4fc069Sjilinxpd if (pp != NULL) 40365f4fc069Sjilinxpd pvn_plist_init(pp, pl, plsz, off, io_len, rw); 40375f4fc069Sjilinxpd 40385f4fc069Sjilinxpd return (error); 40395f4fc069Sjilinxpd } 40405f4fc069Sjilinxpd 40415f4fc069Sjilinxpd /* 40425f4fc069Sjilinxpd * Here NFS has: nfs3_readahead 40435f4fc069Sjilinxpd * No read-ahead in smbfs yet. 40445f4fc069Sjilinxpd */ 40455f4fc069Sjilinxpd 4046*8329232eSGordon Ross #endif // _KERNEL 4047*8329232eSGordon Ross 40485f4fc069Sjilinxpd /* 40495f4fc069Sjilinxpd * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE} 40505f4fc069Sjilinxpd * If len == 0, do from off to EOF. 40515f4fc069Sjilinxpd * 40525f4fc069Sjilinxpd * The normal cases should be len == 0 && off == 0 (entire vp list), 40535f4fc069Sjilinxpd * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE 40545f4fc069Sjilinxpd * (from pageout). 40555f4fc069Sjilinxpd * 40565f4fc069Sjilinxpd * Like nfs3_putpage + nfs_putpages 40575f4fc069Sjilinxpd */ 40585f4fc069Sjilinxpd /* ARGSUSED */ 40595f4fc069Sjilinxpd static int 40605f4fc069Sjilinxpd smbfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr, 40615f4fc069Sjilinxpd caller_context_t *ct) 40625f4fc069Sjilinxpd { 4063*8329232eSGordon Ross #ifdef _KERNEL 40645f4fc069Sjilinxpd smbnode_t *np; 40655f4fc069Sjilinxpd smbmntinfo_t *smi; 40665f4fc069Sjilinxpd page_t *pp; 40675f4fc069Sjilinxpd u_offset_t eoff; 40685f4fc069Sjilinxpd u_offset_t io_off; 40695f4fc069Sjilinxpd size_t io_len; 40705f4fc069Sjilinxpd int error; 40715f4fc069Sjilinxpd int rdirty; 40725f4fc069Sjilinxpd int err; 40735f4fc069Sjilinxpd 40745f4fc069Sjilinxpd np = VTOSMB(vp); 40755f4fc069Sjilinxpd smi = VTOSMI(vp); 40765f4fc069Sjilinxpd 40775f4fc069Sjilinxpd if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 40785f4fc069Sjilinxpd return (EIO); 40795f4fc069Sjilinxpd 40805f4fc069Sjilinxpd if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 40815f4fc069Sjilinxpd return (EIO); 40825f4fc069Sjilinxpd 40835f4fc069Sjilinxpd if (vp->v_flag & VNOMAP) 40845f4fc069Sjilinxpd return (ENOSYS); 40855f4fc069Sjilinxpd 40865f4fc069Sjilinxpd /* Here NFS does rp->r_count (++/--) stuff. */ 40875f4fc069Sjilinxpd 40885f4fc069Sjilinxpd /* Beginning of code from nfs_putpages. */ 40895f4fc069Sjilinxpd 40905f4fc069Sjilinxpd if (!vn_has_cached_data(vp)) 40915f4fc069Sjilinxpd return (0); 40925f4fc069Sjilinxpd 40935f4fc069Sjilinxpd /* 40945f4fc069Sjilinxpd * If ROUTOFSPACE is set, then all writes turn into B_INVAL 40955f4fc069Sjilinxpd * writes. B_FORCE is set to force the VM system to actually 40965f4fc069Sjilinxpd * invalidate the pages, even if the i/o failed. The pages 40975f4fc069Sjilinxpd * need to get invalidated because they can't be written out 40985f4fc069Sjilinxpd * because there isn't any space left on either the server's 40995f4fc069Sjilinxpd * file system or in the user's disk quota. The B_FREE bit 41005f4fc069Sjilinxpd * is cleared to avoid confusion as to whether this is a 41015f4fc069Sjilinxpd * request to place the page on the freelist or to destroy 41025f4fc069Sjilinxpd * it. 41035f4fc069Sjilinxpd */ 41045f4fc069Sjilinxpd if ((np->r_flags & ROUTOFSPACE) || 41055f4fc069Sjilinxpd (vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)) 41065f4fc069Sjilinxpd flags = (flags & ~B_FREE) | B_INVAL | B_FORCE; 41075f4fc069Sjilinxpd 41085f4fc069Sjilinxpd if (len == 0) { 41095f4fc069Sjilinxpd /* 41105f4fc069Sjilinxpd * If doing a full file synchronous operation, then clear 41115f4fc069Sjilinxpd * the RDIRTY bit. If a page gets dirtied while the flush 41125f4fc069Sjilinxpd * is happening, then RDIRTY will get set again. The 41135f4fc069Sjilinxpd * RDIRTY bit must get cleared before the flush so that 41145f4fc069Sjilinxpd * we don't lose this information. 41155f4fc069Sjilinxpd * 41165f4fc069Sjilinxpd * NFS has B_ASYNC vs sync stuff here. 41175f4fc069Sjilinxpd */ 41185f4fc069Sjilinxpd if (off == (u_offset_t)0 && 41195f4fc069Sjilinxpd (np->r_flags & RDIRTY)) { 41205f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 41215f4fc069Sjilinxpd rdirty = (np->r_flags & RDIRTY); 41225f4fc069Sjilinxpd np->r_flags &= ~RDIRTY; 41235f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 41245f4fc069Sjilinxpd } else 41255f4fc069Sjilinxpd rdirty = 0; 41265f4fc069Sjilinxpd 41275f4fc069Sjilinxpd /* 41285f4fc069Sjilinxpd * Search the entire vp list for pages >= off, and flush 41295f4fc069Sjilinxpd * the dirty pages. 41305f4fc069Sjilinxpd */ 41315f4fc069Sjilinxpd error = pvn_vplist_dirty(vp, off, smbfs_putapage, 41325f4fc069Sjilinxpd flags, cr); 41335f4fc069Sjilinxpd 41345f4fc069Sjilinxpd /* 41355f4fc069Sjilinxpd * If an error occurred and the file was marked as dirty 41365f4fc069Sjilinxpd * before and we aren't forcibly invalidating pages, then 41375f4fc069Sjilinxpd * reset the RDIRTY flag. 41385f4fc069Sjilinxpd */ 41395f4fc069Sjilinxpd if (error && rdirty && 41405f4fc069Sjilinxpd (flags & (B_INVAL | B_FORCE)) != (B_INVAL | B_FORCE)) { 41415f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 41425f4fc069Sjilinxpd np->r_flags |= RDIRTY; 41435f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 41445f4fc069Sjilinxpd } 41455f4fc069Sjilinxpd } else { 41465f4fc069Sjilinxpd /* 41475f4fc069Sjilinxpd * Do a range from [off...off + len) looking for pages 41485f4fc069Sjilinxpd * to deal with. 41495f4fc069Sjilinxpd */ 41505f4fc069Sjilinxpd error = 0; 41515f4fc069Sjilinxpd io_len = 1; /* quiet warnings */ 41525f4fc069Sjilinxpd eoff = off + len; 41535f4fc069Sjilinxpd 41545f4fc069Sjilinxpd for (io_off = off; io_off < eoff; io_off += io_len) { 41555f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 41565f4fc069Sjilinxpd if (io_off >= np->r_size) { 41575f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 41585f4fc069Sjilinxpd break; 41595f4fc069Sjilinxpd } 41605f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 41615f4fc069Sjilinxpd /* 41625f4fc069Sjilinxpd * If we are not invalidating, synchronously 41635f4fc069Sjilinxpd * freeing or writing pages use the routine 41645f4fc069Sjilinxpd * page_lookup_nowait() to prevent reclaiming 41655f4fc069Sjilinxpd * them from the free list. 41665f4fc069Sjilinxpd */ 41675f4fc069Sjilinxpd if ((flags & B_INVAL) || !(flags & B_ASYNC)) { 41685f4fc069Sjilinxpd pp = page_lookup(vp, io_off, 41695f4fc069Sjilinxpd (flags & (B_INVAL | B_FREE)) ? 41705f4fc069Sjilinxpd SE_EXCL : SE_SHARED); 41715f4fc069Sjilinxpd } else { 41725f4fc069Sjilinxpd pp = page_lookup_nowait(vp, io_off, 41735f4fc069Sjilinxpd (flags & B_FREE) ? SE_EXCL : SE_SHARED); 41745f4fc069Sjilinxpd } 41755f4fc069Sjilinxpd 41765f4fc069Sjilinxpd if (pp == NULL || !pvn_getdirty(pp, flags)) 41775f4fc069Sjilinxpd io_len = PAGESIZE; 41785f4fc069Sjilinxpd else { 41795f4fc069Sjilinxpd err = smbfs_putapage(vp, pp, &io_off, 41805f4fc069Sjilinxpd &io_len, flags, cr); 41815f4fc069Sjilinxpd if (!error) 41825f4fc069Sjilinxpd error = err; 41835f4fc069Sjilinxpd /* 41845f4fc069Sjilinxpd * "io_off" and "io_len" are returned as 41855f4fc069Sjilinxpd * the range of pages we actually wrote. 41865f4fc069Sjilinxpd * This allows us to skip ahead more quickly 41875f4fc069Sjilinxpd * since several pages may've been dealt 41885f4fc069Sjilinxpd * with by this iteration of the loop. 41895f4fc069Sjilinxpd */ 41905f4fc069Sjilinxpd } 41915f4fc069Sjilinxpd } 41925f4fc069Sjilinxpd } 41935f4fc069Sjilinxpd 41945f4fc069Sjilinxpd return (error); 4195*8329232eSGordon Ross 4196*8329232eSGordon Ross #else // _KERNEL 4197*8329232eSGordon Ross return (ENOSYS); 4198*8329232eSGordon Ross #endif // _KERNEL 41995f4fc069Sjilinxpd } 42005f4fc069Sjilinxpd 4201*8329232eSGordon Ross #ifdef _KERNEL 4202*8329232eSGordon Ross 42035f4fc069Sjilinxpd /* 42045f4fc069Sjilinxpd * Write out a single page, possibly klustering adjacent dirty pages. 42055f4fc069Sjilinxpd * 42065f4fc069Sjilinxpd * Like nfs3_putapage / nfs3_sync_putapage 42075f4fc069Sjilinxpd */ 42085f4fc069Sjilinxpd static int 42095f4fc069Sjilinxpd smbfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp, 42105f4fc069Sjilinxpd int flags, cred_t *cr) 42115f4fc069Sjilinxpd { 42125f4fc069Sjilinxpd smbnode_t *np; 42135f4fc069Sjilinxpd u_offset_t io_off; 42145f4fc069Sjilinxpd u_offset_t lbn_off; 42155f4fc069Sjilinxpd u_offset_t lbn; 42165f4fc069Sjilinxpd size_t io_len; 42175f4fc069Sjilinxpd uint_t bsize; 42185f4fc069Sjilinxpd int error; 42195f4fc069Sjilinxpd 42205f4fc069Sjilinxpd np = VTOSMB(vp); 42215f4fc069Sjilinxpd 42225f4fc069Sjilinxpd ASSERT(!vn_is_readonly(vp)); 42235f4fc069Sjilinxpd 42245f4fc069Sjilinxpd bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE); 42255f4fc069Sjilinxpd lbn = pp->p_offset / bsize; 42265f4fc069Sjilinxpd lbn_off = lbn * bsize; 42275f4fc069Sjilinxpd 42285f4fc069Sjilinxpd /* 42295f4fc069Sjilinxpd * Find a kluster that fits in one block, or in 42305f4fc069Sjilinxpd * one page if pages are bigger than blocks. If 42315f4fc069Sjilinxpd * there is less file space allocated than a whole 42325f4fc069Sjilinxpd * page, we'll shorten the i/o request below. 42335f4fc069Sjilinxpd */ 42345f4fc069Sjilinxpd pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off, 42355f4fc069Sjilinxpd roundup(bsize, PAGESIZE), flags); 42365f4fc069Sjilinxpd 42375f4fc069Sjilinxpd /* 42385f4fc069Sjilinxpd * pvn_write_kluster shouldn't have returned a page with offset 42395f4fc069Sjilinxpd * behind the original page we were given. Verify that. 42405f4fc069Sjilinxpd */ 42415f4fc069Sjilinxpd ASSERT((pp->p_offset / bsize) >= lbn); 42425f4fc069Sjilinxpd 42435f4fc069Sjilinxpd /* 42445f4fc069Sjilinxpd * Now pp will have the list of kept dirty pages marked for 42455f4fc069Sjilinxpd * write back. It will also handle invalidation and freeing 42465f4fc069Sjilinxpd * of pages that are not dirty. Check for page length rounding 42475f4fc069Sjilinxpd * problems. 42485f4fc069Sjilinxpd */ 42495f4fc069Sjilinxpd if (io_off + io_len > lbn_off + bsize) { 42505f4fc069Sjilinxpd ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE); 42515f4fc069Sjilinxpd io_len = lbn_off + bsize - io_off; 42525f4fc069Sjilinxpd } 42535f4fc069Sjilinxpd /* 42545f4fc069Sjilinxpd * The RMODINPROGRESS flag makes sure that smbfs_bio() sees a 42555f4fc069Sjilinxpd * consistent value of r_size. RMODINPROGRESS is set in writerp(). 42565f4fc069Sjilinxpd * When RMODINPROGRESS is set it indicates that a uiomove() is in 42575f4fc069Sjilinxpd * progress and the r_size has not been made consistent with the 42585f4fc069Sjilinxpd * new size of the file. When the uiomove() completes the r_size is 42595f4fc069Sjilinxpd * updated and the RMODINPROGRESS flag is cleared. 42605f4fc069Sjilinxpd * 42615f4fc069Sjilinxpd * The RMODINPROGRESS flag makes sure that smbfs_bio() sees a 42625f4fc069Sjilinxpd * consistent value of r_size. Without this handshaking, it is 42635f4fc069Sjilinxpd * possible that smbfs_bio() picks up the old value of r_size 42645f4fc069Sjilinxpd * before the uiomove() in writerp() completes. This will result 42655f4fc069Sjilinxpd * in the write through smbfs_bio() being dropped. 42665f4fc069Sjilinxpd * 42675f4fc069Sjilinxpd * More precisely, there is a window between the time the uiomove() 42685f4fc069Sjilinxpd * completes and the time the r_size is updated. If a VOP_PUTPAGE() 42695f4fc069Sjilinxpd * operation intervenes in this window, the page will be picked up, 42705f4fc069Sjilinxpd * because it is dirty (it will be unlocked, unless it was 42715f4fc069Sjilinxpd * pagecreate'd). When the page is picked up as dirty, the dirty 42725f4fc069Sjilinxpd * bit is reset (pvn_getdirty()). In smbfs_write(), r_size is 42735f4fc069Sjilinxpd * checked. This will still be the old size. Therefore the page will 42745f4fc069Sjilinxpd * not be written out. When segmap_release() calls VOP_PUTPAGE(), 42755f4fc069Sjilinxpd * the page will be found to be clean and the write will be dropped. 42765f4fc069Sjilinxpd */ 42775f4fc069Sjilinxpd if (np->r_flags & RMODINPROGRESS) { 42785f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 42795f4fc069Sjilinxpd if ((np->r_flags & RMODINPROGRESS) && 42805f4fc069Sjilinxpd np->r_modaddr + MAXBSIZE > io_off && 42815f4fc069Sjilinxpd np->r_modaddr < io_off + io_len) { 42825f4fc069Sjilinxpd page_t *plist; 42835f4fc069Sjilinxpd /* 42845f4fc069Sjilinxpd * A write is in progress for this region of the file. 42855f4fc069Sjilinxpd * If we did not detect RMODINPROGRESS here then this 42865f4fc069Sjilinxpd * path through smbfs_putapage() would eventually go to 42875f4fc069Sjilinxpd * smbfs_bio() and may not write out all of the data 42885f4fc069Sjilinxpd * in the pages. We end up losing data. So we decide 42895f4fc069Sjilinxpd * to set the modified bit on each page in the page 42905f4fc069Sjilinxpd * list and mark the rnode with RDIRTY. This write 42915f4fc069Sjilinxpd * will be restarted at some later time. 42925f4fc069Sjilinxpd */ 42935f4fc069Sjilinxpd plist = pp; 42945f4fc069Sjilinxpd while (plist != NULL) { 42955f4fc069Sjilinxpd pp = plist; 42965f4fc069Sjilinxpd page_sub(&plist, pp); 42975f4fc069Sjilinxpd hat_setmod(pp); 42985f4fc069Sjilinxpd page_io_unlock(pp); 42995f4fc069Sjilinxpd page_unlock(pp); 43005f4fc069Sjilinxpd } 43015f4fc069Sjilinxpd np->r_flags |= RDIRTY; 43025f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 43035f4fc069Sjilinxpd if (offp) 43045f4fc069Sjilinxpd *offp = io_off; 43055f4fc069Sjilinxpd if (lenp) 43065f4fc069Sjilinxpd *lenp = io_len; 43075f4fc069Sjilinxpd return (0); 43085f4fc069Sjilinxpd } 43095f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 43105f4fc069Sjilinxpd } 43115f4fc069Sjilinxpd 43125f4fc069Sjilinxpd /* 43135f4fc069Sjilinxpd * NFS handles (flags & B_ASYNC) here... 43145f4fc069Sjilinxpd * (See nfs_async_putapage()) 43155f4fc069Sjilinxpd * 43165f4fc069Sjilinxpd * This code section from: nfs3_sync_putapage() 43175f4fc069Sjilinxpd */ 43185f4fc069Sjilinxpd 43195f4fc069Sjilinxpd flags |= B_WRITE; 43205f4fc069Sjilinxpd 43215f4fc069Sjilinxpd error = smbfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr); 43225f4fc069Sjilinxpd 43235f4fc069Sjilinxpd if ((error == ENOSPC || error == EDQUOT || error == EFBIG || 43245f4fc069Sjilinxpd error == EACCES) && 43255f4fc069Sjilinxpd (flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) { 43265f4fc069Sjilinxpd if (!(np->r_flags & ROUTOFSPACE)) { 43275f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 43285f4fc069Sjilinxpd np->r_flags |= ROUTOFSPACE; 43295f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 43305f4fc069Sjilinxpd } 43315f4fc069Sjilinxpd flags |= B_ERROR; 43325f4fc069Sjilinxpd pvn_write_done(pp, flags); 43335f4fc069Sjilinxpd /* 43345f4fc069Sjilinxpd * If this was not an async thread, then try again to 43355f4fc069Sjilinxpd * write out the pages, but this time, also destroy 43365f4fc069Sjilinxpd * them whether or not the write is successful. This 43375f4fc069Sjilinxpd * will prevent memory from filling up with these 43385f4fc069Sjilinxpd * pages and destroying them is the only alternative 43395f4fc069Sjilinxpd * if they can't be written out. 43405f4fc069Sjilinxpd * 43415f4fc069Sjilinxpd * Don't do this if this is an async thread because 43425f4fc069Sjilinxpd * when the pages are unlocked in pvn_write_done, 43435f4fc069Sjilinxpd * some other thread could have come along, locked 43445f4fc069Sjilinxpd * them, and queued for an async thread. It would be 43455f4fc069Sjilinxpd * possible for all of the async threads to be tied 43465f4fc069Sjilinxpd * up waiting to lock the pages again and they would 43475f4fc069Sjilinxpd * all already be locked and waiting for an async 43485f4fc069Sjilinxpd * thread to handle them. Deadlock. 43495f4fc069Sjilinxpd */ 43505f4fc069Sjilinxpd if (!(flags & B_ASYNC)) { 43515f4fc069Sjilinxpd error = smbfs_putpage(vp, io_off, io_len, 43525f4fc069Sjilinxpd B_INVAL | B_FORCE, cr, NULL); 43535f4fc069Sjilinxpd } 43545f4fc069Sjilinxpd } else { 43555f4fc069Sjilinxpd if (error) 43565f4fc069Sjilinxpd flags |= B_ERROR; 43575f4fc069Sjilinxpd else if (np->r_flags & ROUTOFSPACE) { 43585f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 43595f4fc069Sjilinxpd np->r_flags &= ~ROUTOFSPACE; 43605f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 43615f4fc069Sjilinxpd } 43625f4fc069Sjilinxpd pvn_write_done(pp, flags); 43635f4fc069Sjilinxpd } 43645f4fc069Sjilinxpd 43655f4fc069Sjilinxpd /* Now more code from: nfs3_putapage */ 43665f4fc069Sjilinxpd 43675f4fc069Sjilinxpd if (offp) 43685f4fc069Sjilinxpd *offp = io_off; 43695f4fc069Sjilinxpd if (lenp) 43705f4fc069Sjilinxpd *lenp = io_len; 43715f4fc069Sjilinxpd 43725f4fc069Sjilinxpd return (error); 43735f4fc069Sjilinxpd } 43745f4fc069Sjilinxpd 4375*8329232eSGordon Ross #endif // _KERNEL 4376*8329232eSGordon Ross 4377*8329232eSGordon Ross 43785f4fc069Sjilinxpd /* 43795f4fc069Sjilinxpd * NFS has this in nfs_client.c (shared by v2,v3,...) 43805f4fc069Sjilinxpd * We have it here so smbfs_putapage can be file scope. 43815f4fc069Sjilinxpd */ 43825f4fc069Sjilinxpd void 43835f4fc069Sjilinxpd smbfs_invalidate_pages(vnode_t *vp, u_offset_t off, cred_t *cr) 43845f4fc069Sjilinxpd { 43855f4fc069Sjilinxpd smbnode_t *np; 43865f4fc069Sjilinxpd 43875f4fc069Sjilinxpd np = VTOSMB(vp); 43885f4fc069Sjilinxpd 43895f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 43905f4fc069Sjilinxpd while (np->r_flags & RTRUNCATE) 43915f4fc069Sjilinxpd cv_wait(&np->r_cv, &np->r_statelock); 43925f4fc069Sjilinxpd np->r_flags |= RTRUNCATE; 43935f4fc069Sjilinxpd 43945f4fc069Sjilinxpd if (off == (u_offset_t)0) { 43955f4fc069Sjilinxpd np->r_flags &= ~RDIRTY; 43965f4fc069Sjilinxpd if (!(np->r_flags & RSTALE)) 43975f4fc069Sjilinxpd np->r_error = 0; 43985f4fc069Sjilinxpd } 43995f4fc069Sjilinxpd /* Here NFSv3 has np->r_truncaddr = off; */ 44005f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 44015f4fc069Sjilinxpd 4402*8329232eSGordon Ross #ifdef _KERNEL 44035f4fc069Sjilinxpd (void) pvn_vplist_dirty(vp, off, smbfs_putapage, 44045f4fc069Sjilinxpd B_INVAL | B_TRUNC, cr); 4405*8329232eSGordon Ross #endif // _KERNEL 44065f4fc069Sjilinxpd 44075f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 44085f4fc069Sjilinxpd np->r_flags &= ~RTRUNCATE; 44095f4fc069Sjilinxpd cv_broadcast(&np->r_cv); 44105f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 44115f4fc069Sjilinxpd } 44125f4fc069Sjilinxpd 4413*8329232eSGordon Ross #ifdef _KERNEL 4414*8329232eSGordon Ross 44155f4fc069Sjilinxpd /* Like nfs3_map */ 44165f4fc069Sjilinxpd 44175f4fc069Sjilinxpd /* ARGSUSED */ 44185f4fc069Sjilinxpd static int 44195f4fc069Sjilinxpd smbfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp, 44205f4fc069Sjilinxpd size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, 44215f4fc069Sjilinxpd cred_t *cr, caller_context_t *ct) 44225f4fc069Sjilinxpd { 44235f4fc069Sjilinxpd segvn_crargs_t vn_a; 44245f4fc069Sjilinxpd struct vattr va; 44255f4fc069Sjilinxpd smbnode_t *np; 44265f4fc069Sjilinxpd smbmntinfo_t *smi; 44275f4fc069Sjilinxpd int error; 44285f4fc069Sjilinxpd 44295f4fc069Sjilinxpd np = VTOSMB(vp); 44305f4fc069Sjilinxpd smi = VTOSMI(vp); 44315f4fc069Sjilinxpd 44325f4fc069Sjilinxpd if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 44335f4fc069Sjilinxpd return (EIO); 44345f4fc069Sjilinxpd 44355f4fc069Sjilinxpd if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 44365f4fc069Sjilinxpd return (EIO); 44375f4fc069Sjilinxpd 44385f4fc069Sjilinxpd if (vp->v_flag & VNOMAP) 44395f4fc069Sjilinxpd return (ENOSYS); 44405f4fc069Sjilinxpd 44415f4fc069Sjilinxpd if (off < 0 || off + (ssize_t)len < 0) 44425f4fc069Sjilinxpd return (ENXIO); 44435f4fc069Sjilinxpd 44445f4fc069Sjilinxpd if (vp->v_type != VREG) 44455f4fc069Sjilinxpd return (ENODEV); 44465f4fc069Sjilinxpd 44475f4fc069Sjilinxpd /* 44485f4fc069Sjilinxpd * NFS does close-to-open consistency stuff here. 44495f4fc069Sjilinxpd * Just get (possibly cached) attributes. 44505f4fc069Sjilinxpd */ 44515f4fc069Sjilinxpd va.va_mask = AT_ALL; 44525f4fc069Sjilinxpd if ((error = smbfsgetattr(vp, &va, cr)) != 0) 44535f4fc069Sjilinxpd return (error); 44545f4fc069Sjilinxpd 44555f4fc069Sjilinxpd /* 44565f4fc069Sjilinxpd * Check to see if the vnode is currently marked as not cachable. 44575f4fc069Sjilinxpd * This means portions of the file are locked (through VOP_FRLOCK). 44585f4fc069Sjilinxpd * In this case the map request must be refused. We use 44595f4fc069Sjilinxpd * rp->r_lkserlock to avoid a race with concurrent lock requests. 44605f4fc069Sjilinxpd */ 44615f4fc069Sjilinxpd /* 44625f4fc069Sjilinxpd * Atomically increment r_inmap after acquiring r_rwlock. The 44635f4fc069Sjilinxpd * idea here is to acquire r_rwlock to block read/write and 44645f4fc069Sjilinxpd * not to protect r_inmap. r_inmap will inform smbfs_read/write() 44655f4fc069Sjilinxpd * that we are in smbfs_map(). Now, r_rwlock is acquired in order 44665f4fc069Sjilinxpd * and we can prevent the deadlock that would have occurred 44675f4fc069Sjilinxpd * when smbfs_addmap() would have acquired it out of order. 44685f4fc069Sjilinxpd * 44695f4fc069Sjilinxpd * Since we are not protecting r_inmap by any lock, we do not 44705f4fc069Sjilinxpd * hold any lock when we decrement it. We atomically decrement 44715f4fc069Sjilinxpd * r_inmap after we release r_lkserlock. Note that rwlock is 44725f4fc069Sjilinxpd * re-entered as writer in smbfs_addmap (called via as_map). 44735f4fc069Sjilinxpd */ 44745f4fc069Sjilinxpd 44755f4fc069Sjilinxpd if (smbfs_rw_enter_sig(&np->r_rwlock, RW_WRITER, SMBINTR(vp))) 44765f4fc069Sjilinxpd return (EINTR); 44775f4fc069Sjilinxpd atomic_inc_uint(&np->r_inmap); 44785f4fc069Sjilinxpd smbfs_rw_exit(&np->r_rwlock); 44795f4fc069Sjilinxpd 44805f4fc069Sjilinxpd if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp))) { 44815f4fc069Sjilinxpd atomic_dec_uint(&np->r_inmap); 44825f4fc069Sjilinxpd return (EINTR); 44835f4fc069Sjilinxpd } 44845f4fc069Sjilinxpd 44855f4fc069Sjilinxpd if (vp->v_flag & VNOCACHE) { 44865f4fc069Sjilinxpd error = EAGAIN; 44875f4fc069Sjilinxpd goto done; 44885f4fc069Sjilinxpd } 44895f4fc069Sjilinxpd 44905f4fc069Sjilinxpd /* 44915f4fc069Sjilinxpd * Don't allow concurrent locks and mapping if mandatory locking is 44925f4fc069Sjilinxpd * enabled. 44935f4fc069Sjilinxpd */ 44945f4fc069Sjilinxpd if ((flk_has_remote_locks(vp) || smbfs_lm_has_sleep(vp)) && 44955f4fc069Sjilinxpd MANDLOCK(vp, va.va_mode)) { 44965f4fc069Sjilinxpd error = EAGAIN; 44975f4fc069Sjilinxpd goto done; 44985f4fc069Sjilinxpd } 44995f4fc069Sjilinxpd 45005f4fc069Sjilinxpd as_rangelock(as); 45015f4fc069Sjilinxpd error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags); 45025f4fc069Sjilinxpd if (error != 0) { 45035f4fc069Sjilinxpd as_rangeunlock(as); 45045f4fc069Sjilinxpd goto done; 45055f4fc069Sjilinxpd } 45065f4fc069Sjilinxpd 45075f4fc069Sjilinxpd vn_a.vp = vp; 45085f4fc069Sjilinxpd vn_a.offset = off; 45095f4fc069Sjilinxpd vn_a.type = (flags & MAP_TYPE); 45105f4fc069Sjilinxpd vn_a.prot = (uchar_t)prot; 45115f4fc069Sjilinxpd vn_a.maxprot = (uchar_t)maxprot; 45125f4fc069Sjilinxpd vn_a.flags = (flags & ~MAP_TYPE); 45135f4fc069Sjilinxpd vn_a.cred = cr; 45145f4fc069Sjilinxpd vn_a.amp = NULL; 45155f4fc069Sjilinxpd vn_a.szc = 0; 45165f4fc069Sjilinxpd vn_a.lgrp_mem_policy_flags = 0; 45175f4fc069Sjilinxpd 45185f4fc069Sjilinxpd error = as_map(as, *addrp, len, segvn_create, &vn_a); 45195f4fc069Sjilinxpd as_rangeunlock(as); 45205f4fc069Sjilinxpd 45215f4fc069Sjilinxpd done: 45225f4fc069Sjilinxpd smbfs_rw_exit(&np->r_lkserlock); 45235f4fc069Sjilinxpd atomic_dec_uint(&np->r_inmap); 45245f4fc069Sjilinxpd return (error); 45255f4fc069Sjilinxpd } 45265f4fc069Sjilinxpd 45274e72ade1SGordon Ross /* 45284e72ade1SGordon Ross * This uses addmap/delmap functions to hold the SMB FID open as long as 45294e72ade1SGordon Ross * there are pages mapped in this as/seg. Increment the FID refs. when 45304e72ade1SGordon Ross * the maping count goes from zero to non-zero, and release the FID ref 45314e72ade1SGordon Ross * when the maping count goes from non-zero to zero. 45324e72ade1SGordon Ross */ 45334e72ade1SGordon Ross 45345f4fc069Sjilinxpd /* ARGSUSED */ 45355f4fc069Sjilinxpd static int 45365f4fc069Sjilinxpd smbfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 45375f4fc069Sjilinxpd size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, 45385f4fc069Sjilinxpd cred_t *cr, caller_context_t *ct) 45395f4fc069Sjilinxpd { 45405f4fc069Sjilinxpd smbnode_t *np = VTOSMB(vp); 45415f4fc069Sjilinxpd boolean_t inc_fidrefs = B_FALSE; 45425f4fc069Sjilinxpd 45435f4fc069Sjilinxpd /* 45445f4fc069Sjilinxpd * When r_mapcnt goes from zero to non-zero, 45455f4fc069Sjilinxpd * increment n_fidrefs 45465f4fc069Sjilinxpd */ 45475f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 45485f4fc069Sjilinxpd if (np->r_mapcnt == 0) 45495f4fc069Sjilinxpd inc_fidrefs = B_TRUE; 45505f4fc069Sjilinxpd np->r_mapcnt += btopr(len); 45515f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 45525f4fc069Sjilinxpd 45535f4fc069Sjilinxpd if (inc_fidrefs) { 45545f4fc069Sjilinxpd (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0); 45555f4fc069Sjilinxpd np->n_fidrefs++; 45565f4fc069Sjilinxpd smbfs_rw_exit(&np->r_lkserlock); 45575f4fc069Sjilinxpd } 45585f4fc069Sjilinxpd 45595f4fc069Sjilinxpd return (0); 45605f4fc069Sjilinxpd } 45615f4fc069Sjilinxpd 45625f4fc069Sjilinxpd /* 45634e72ade1SGordon Ross * Args passed to smbfs_delmap_async 45645f4fc069Sjilinxpd */ 45655f4fc069Sjilinxpd typedef struct smbfs_delmap_args { 45664e72ade1SGordon Ross taskq_ent_t dm_tqent; 45674e72ade1SGordon Ross cred_t *dm_cr; 45684e72ade1SGordon Ross vnode_t *dm_vp; 45694e72ade1SGordon Ross offset_t dm_off; 45704e72ade1SGordon Ross caddr_t dm_addr; 45714e72ade1SGordon Ross size_t dm_len; 45724e72ade1SGordon Ross uint_t dm_prot; 45734e72ade1SGordon Ross uint_t dm_maxprot; 45744e72ade1SGordon Ross uint_t dm_flags; 45754e72ade1SGordon Ross boolean_t dm_rele_fid; 45765f4fc069Sjilinxpd } smbfs_delmap_args_t; 45775f4fc069Sjilinxpd 45784e72ade1SGordon Ross /* 45794e72ade1SGordon Ross * Using delmap not only to release the SMB FID (as described above) 45804e72ade1SGordon Ross * but to flush dirty pages as needed. Both of those do the actual 45814e72ade1SGordon Ross * work in an async taskq job to avoid interfering with locks held 45824e72ade1SGordon Ross * in the VM layer when this is called. 45834e72ade1SGordon Ross */ 45844e72ade1SGordon Ross 45855f4fc069Sjilinxpd /* ARGSUSED */ 45865f4fc069Sjilinxpd static int 45875f4fc069Sjilinxpd smbfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 45885f4fc069Sjilinxpd size_t len, uint_t prot, uint_t maxprot, uint_t flags, 45895f4fc069Sjilinxpd cred_t *cr, caller_context_t *ct) 45905f4fc069Sjilinxpd { 45914e72ade1SGordon Ross smbnode_t *np = VTOSMB(vp); 45924e72ade1SGordon Ross smbmntinfo_t *smi = VTOSMI(vp); 45935f4fc069Sjilinxpd smbfs_delmap_args_t *dmapp; 45945f4fc069Sjilinxpd 45955f4fc069Sjilinxpd dmapp = kmem_zalloc(sizeof (*dmapp), KM_SLEEP); 45965f4fc069Sjilinxpd 45974e72ade1SGordon Ross /* 45984e72ade1SGordon Ross * The VM layer may segvn_free the seg holding this vnode 45994e72ade1SGordon Ross * before our callback has a chance run, so take a hold on 46004e72ade1SGordon Ross * the vnode here and release it in the callback. 46014e72ade1SGordon Ross * (same for the cred) 46024e72ade1SGordon Ross */ 46034e72ade1SGordon Ross crhold(cr); 46044e72ade1SGordon Ross VN_HOLD(vp); 46054e72ade1SGordon Ross 46064e72ade1SGordon Ross dmapp->dm_vp = vp; 46074e72ade1SGordon Ross dmapp->dm_cr = cr; 46084e72ade1SGordon Ross dmapp->dm_off = off; 46094e72ade1SGordon Ross dmapp->dm_addr = addr; 46104e72ade1SGordon Ross dmapp->dm_len = len; 46114e72ade1SGordon Ross dmapp->dm_prot = prot; 46124e72ade1SGordon Ross dmapp->dm_maxprot = maxprot; 46134e72ade1SGordon Ross dmapp->dm_flags = flags; 46144e72ade1SGordon Ross dmapp->dm_rele_fid = B_FALSE; 46155f4fc069Sjilinxpd 46165f4fc069Sjilinxpd /* 46174e72ade1SGordon Ross * Go ahead and decrement r_mapcount now, which is 46184e72ade1SGordon Ross * the primary purpose of this function. 46194e72ade1SGordon Ross * 46204e72ade1SGordon Ross * When r_mapcnt goes to zero, we need to call 46214e72ade1SGordon Ross * smbfs_rele_fid, but can't do that here, so 46224e72ade1SGordon Ross * set a flag telling the async task to do it. 46235f4fc069Sjilinxpd */ 46245f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 46255f4fc069Sjilinxpd np->r_mapcnt -= btopr(len); 46265f4fc069Sjilinxpd ASSERT(np->r_mapcnt >= 0); 46275f4fc069Sjilinxpd if (np->r_mapcnt == 0) 46284e72ade1SGordon Ross dmapp->dm_rele_fid = B_TRUE; 46295f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 46305f4fc069Sjilinxpd 46314e72ade1SGordon Ross taskq_dispatch_ent(smi->smi_taskq, smbfs_delmap_async, dmapp, 0, 46324e72ade1SGordon Ross &dmapp->dm_tqent); 46335f4fc069Sjilinxpd 46345f4fc069Sjilinxpd return (0); 46355f4fc069Sjilinxpd } 46365f4fc069Sjilinxpd 46375f4fc069Sjilinxpd /* 46385f4fc069Sjilinxpd * Remove some pages from an mmap'd vnode. Flush any 46395f4fc069Sjilinxpd * dirty pages in the unmapped range. 46405f4fc069Sjilinxpd */ 46415f4fc069Sjilinxpd /* ARGSUSED */ 46425f4fc069Sjilinxpd static void 46434e72ade1SGordon Ross smbfs_delmap_async(void *varg) 46445f4fc069Sjilinxpd { 46454e72ade1SGordon Ross smbfs_delmap_args_t *dmapp = varg; 46464e72ade1SGordon Ross cred_t *cr; 46475f4fc069Sjilinxpd vnode_t *vp; 46485f4fc069Sjilinxpd smbnode_t *np; 46495f4fc069Sjilinxpd smbmntinfo_t *smi; 46505f4fc069Sjilinxpd 46514e72ade1SGordon Ross cr = dmapp->dm_cr; 46524e72ade1SGordon Ross vp = dmapp->dm_vp; 46535f4fc069Sjilinxpd np = VTOSMB(vp); 46545f4fc069Sjilinxpd smi = VTOSMI(vp); 46555f4fc069Sjilinxpd 46565f4fc069Sjilinxpd /* Decremented r_mapcnt in smbfs_delmap */ 46575f4fc069Sjilinxpd 46585f4fc069Sjilinxpd /* 46595f4fc069Sjilinxpd * Initiate a page flush and potential commit if there are 46605f4fc069Sjilinxpd * pages, the file system was not mounted readonly, the segment 46615f4fc069Sjilinxpd * was mapped shared, and the pages themselves were writeable. 46625f4fc069Sjilinxpd * 46635f4fc069Sjilinxpd * mark RDIRTY here, will be used to check if a file is dirty when 46645f4fc069Sjilinxpd * unmount smbfs 46655f4fc069Sjilinxpd */ 46665f4fc069Sjilinxpd if (vn_has_cached_data(vp) && !vn_is_readonly(vp) && 46674e72ade1SGordon Ross dmapp->dm_flags == MAP_SHARED && 46684e72ade1SGordon Ross (dmapp->dm_maxprot & PROT_WRITE) != 0) { 46695f4fc069Sjilinxpd mutex_enter(&np->r_statelock); 46705f4fc069Sjilinxpd np->r_flags |= RDIRTY; 46715f4fc069Sjilinxpd mutex_exit(&np->r_statelock); 46725f4fc069Sjilinxpd 46735f4fc069Sjilinxpd /* 46745f4fc069Sjilinxpd * Need to finish the putpage before we 46755f4fc069Sjilinxpd * close the OtW FID needed for I/O. 46765f4fc069Sjilinxpd */ 46774e72ade1SGordon Ross (void) smbfs_putpage(vp, dmapp->dm_off, dmapp->dm_len, 0, 46784e72ade1SGordon Ross dmapp->dm_cr, NULL); 46795f4fc069Sjilinxpd } 46805f4fc069Sjilinxpd 46815f4fc069Sjilinxpd if ((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) 46824e72ade1SGordon Ross (void) smbfs_putpage(vp, dmapp->dm_off, dmapp->dm_len, 46834e72ade1SGordon Ross B_INVAL, dmapp->dm_cr, NULL); 46845f4fc069Sjilinxpd 46855f4fc069Sjilinxpd /* 46865f4fc069Sjilinxpd * If r_mapcnt went to zero, drop our FID ref now. 46875f4fc069Sjilinxpd * On the last fidref, this does an OtW close. 46885f4fc069Sjilinxpd */ 46894e72ade1SGordon Ross if (dmapp->dm_rele_fid) { 46905f4fc069Sjilinxpd struct smb_cred scred; 46915f4fc069Sjilinxpd 46925f4fc069Sjilinxpd (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0); 46934e72ade1SGordon Ross smb_credinit(&scred, dmapp->dm_cr); 46945f4fc069Sjilinxpd 46955f4fc069Sjilinxpd smbfs_rele_fid(np, &scred); 46965f4fc069Sjilinxpd 46975f4fc069Sjilinxpd smb_credrele(&scred); 46985f4fc069Sjilinxpd smbfs_rw_exit(&np->r_lkserlock); 46995f4fc069Sjilinxpd } 47005f4fc069Sjilinxpd 47014e72ade1SGordon Ross /* Release holds taken in smbfs_delmap */ 47024e72ade1SGordon Ross VN_RELE(vp); 47034e72ade1SGordon Ross crfree(cr); 47044e72ade1SGordon Ross 47055f4fc069Sjilinxpd kmem_free(dmapp, sizeof (*dmapp)); 47065f4fc069Sjilinxpd } 47075f4fc069Sjilinxpd 47085f4fc069Sjilinxpd /* No smbfs_pageio() or smbfs_dispose() ops. */ 47095f4fc069Sjilinxpd 4710*8329232eSGordon Ross #endif // _KERNEL 4711*8329232eSGordon Ross 47125f4fc069Sjilinxpd /* misc. ******************************************************** */ 47135f4fc069Sjilinxpd 47144bff34e3Sthurlow 47154bff34e3Sthurlow /* 47164bff34e3Sthurlow * XXX 47174bff34e3Sthurlow * This op may need to support PSARC 2007/440, nbmand changes for CIFS Service. 47184bff34e3Sthurlow */ 47194bff34e3Sthurlow static int 47204bff34e3Sthurlow smbfs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, 47214bff34e3Sthurlow offset_t offset, struct flk_callback *flk_cbp, cred_t *cr, 47224bff34e3Sthurlow caller_context_t *ct) 47234bff34e3Sthurlow { 4724a19609f8Sjv if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone) 47254bff34e3Sthurlow return (EIO); 47264bff34e3Sthurlow 47274bff34e3Sthurlow if (VTOSMI(vp)->smi_flags & SMI_LLOCK) 47284bff34e3Sthurlow return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct)); 47294bff34e3Sthurlow else 47304bff34e3Sthurlow return (ENOSYS); 47314bff34e3Sthurlow } 47324bff34e3Sthurlow 47334bff34e3Sthurlow /* 47344bff34e3Sthurlow * Free storage space associated with the specified vnode. The portion 47354bff34e3Sthurlow * to be freed is specified by bfp->l_start and bfp->l_len (already 47364bff34e3Sthurlow * normalized to a "whence" of 0). 47374bff34e3Sthurlow * 47384bff34e3Sthurlow * Called by fcntl(fd, F_FREESP, lkp) for libc:ftruncate, etc. 47394bff34e3Sthurlow */ 47404bff34e3Sthurlow /* ARGSUSED */ 47414bff34e3Sthurlow static int 47424bff34e3Sthurlow smbfs_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, 47434bff34e3Sthurlow offset_t offset, cred_t *cr, caller_context_t *ct) 47444bff34e3Sthurlow { 47454bff34e3Sthurlow int error; 47464bff34e3Sthurlow smbmntinfo_t *smi; 47474bff34e3Sthurlow 47484bff34e3Sthurlow smi = VTOSMI(vp); 47494bff34e3Sthurlow 4750a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 47514bff34e3Sthurlow return (EIO); 47524bff34e3Sthurlow 47534bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 47544bff34e3Sthurlow return (EIO); 47554bff34e3Sthurlow 475691d632c8Sgwr /* Caller (fcntl) has checked v_type */ 47574bff34e3Sthurlow ASSERT(vp->v_type == VREG); 47584bff34e3Sthurlow if (cmd != F_FREESP) 47594bff34e3Sthurlow return (EINVAL); 47604bff34e3Sthurlow 47614bff34e3Sthurlow /* 47624bff34e3Sthurlow * Like NFS3, no 32-bit offset checks here. 47634bff34e3Sthurlow * Our SMB layer takes care to return EFBIG 47644bff34e3Sthurlow * when it has to fallback to a 32-bit call. 47654bff34e3Sthurlow */ 47664bff34e3Sthurlow 47674bff34e3Sthurlow error = convoff(vp, bfp, 0, offset); 47684bff34e3Sthurlow if (!error) { 47694bff34e3Sthurlow ASSERT(bfp->l_start >= 0); 47704bff34e3Sthurlow if (bfp->l_len == 0) { 47714bff34e3Sthurlow struct vattr va; 47724bff34e3Sthurlow 47734bff34e3Sthurlow /* 47744bff34e3Sthurlow * ftruncate should not change the ctime and 47754bff34e3Sthurlow * mtime if we truncate the file to its 47764bff34e3Sthurlow * previous size. 47774bff34e3Sthurlow */ 47784bff34e3Sthurlow va.va_mask = AT_SIZE; 47794bff34e3Sthurlow error = smbfsgetattr(vp, &va, cr); 47804bff34e3Sthurlow if (error || va.va_size == bfp->l_start) 47814bff34e3Sthurlow return (error); 47824bff34e3Sthurlow va.va_mask = AT_SIZE; 47834bff34e3Sthurlow va.va_size = bfp->l_start; 47844bff34e3Sthurlow error = smbfssetattr(vp, &va, 0, cr); 47855f4fc069Sjilinxpd /* SMBFS_VNEVENT... */ 47864bff34e3Sthurlow } else 47874bff34e3Sthurlow error = EINVAL; 47884bff34e3Sthurlow } 47894bff34e3Sthurlow 47904bff34e3Sthurlow return (error); 47914bff34e3Sthurlow } 47924bff34e3Sthurlow 47935f4fc069Sjilinxpd 47945f4fc069Sjilinxpd /* ARGSUSED */ 47955f4fc069Sjilinxpd static int 47965f4fc069Sjilinxpd smbfs_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct) 47975f4fc069Sjilinxpd { 47985f4fc069Sjilinxpd 47995f4fc069Sjilinxpd return (ENOSYS); 48005f4fc069Sjilinxpd } 48015f4fc069Sjilinxpd 48025f4fc069Sjilinxpd 48034bff34e3Sthurlow /* ARGSUSED */ 48044bff34e3Sthurlow static int 48054bff34e3Sthurlow smbfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr, 48064bff34e3Sthurlow caller_context_t *ct) 48074bff34e3Sthurlow { 480891d632c8Sgwr vfs_t *vfs; 48094bff34e3Sthurlow smbmntinfo_t *smi; 48104bff34e3Sthurlow struct smb_share *ssp; 48114bff34e3Sthurlow 481291d632c8Sgwr vfs = vp->v_vfsp; 481391d632c8Sgwr smi = VFTOSMI(vfs); 48144bff34e3Sthurlow 4815a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 48164bff34e3Sthurlow return (EIO); 48174bff34e3Sthurlow 48184bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 48194bff34e3Sthurlow return (EIO); 48204bff34e3Sthurlow 48214bff34e3Sthurlow switch (cmd) { 48224bff34e3Sthurlow case _PC_FILESIZEBITS: 48234bff34e3Sthurlow ssp = smi->smi_share; 48244bff34e3Sthurlow if (SSTOVC(ssp)->vc_sopt.sv_caps & SMB_CAP_LARGE_FILES) 48254bff34e3Sthurlow *valp = 64; 48264bff34e3Sthurlow else 48274bff34e3Sthurlow *valp = 32; 48284bff34e3Sthurlow break; 48294bff34e3Sthurlow 48304bff34e3Sthurlow case _PC_LINK_MAX: 48314bff34e3Sthurlow /* We only ever report one link to an object */ 48324bff34e3Sthurlow *valp = 1; 48334bff34e3Sthurlow break; 48344bff34e3Sthurlow 48357568150aSgwr case _PC_ACL_ENABLED: 48367568150aSgwr /* 483702d09e03SGordon Ross * Always indicate that ACLs are enabled and 483802d09e03SGordon Ross * that we support ACE_T format, otherwise 483902d09e03SGordon Ross * libsec will ask for ACLENT_T format data 484002d09e03SGordon Ross * which we don't support. 48417568150aSgwr */ 48427568150aSgwr *valp = _ACL_ACE_ENABLED; 48437568150aSgwr break; 48447568150aSgwr 48454bff34e3Sthurlow case _PC_SYMLINK_MAX: /* No symlinks until we do Unix extensions */ 48464bff34e3Sthurlow *valp = 0; 48474bff34e3Sthurlow break; 48484bff34e3Sthurlow 484991d632c8Sgwr case _PC_XATTR_EXISTS: 485091d632c8Sgwr if (vfs->vfs_flag & VFS_XATTR) { 485191d632c8Sgwr *valp = smbfs_xa_exists(vp, cr); 485291d632c8Sgwr break; 485391d632c8Sgwr } 485491d632c8Sgwr return (EINVAL); 485591d632c8Sgwr 485628162916SGordon Ross case _PC_SATTR_ENABLED: 485728162916SGordon Ross case _PC_SATTR_EXISTS: 485828162916SGordon Ross *valp = 1; 485928162916SGordon Ross break; 486028162916SGordon Ross 48613b862e9aSRoger A. Faulkner case _PC_TIMESTAMP_RESOLUTION: 486202d09e03SGordon Ross /* 486302d09e03SGordon Ross * Windows times are tenths of microseconds 486402d09e03SGordon Ross * (multiples of 100 nanoseconds). 486502d09e03SGordon Ross */ 486602d09e03SGordon Ross *valp = 100L; 48673b862e9aSRoger A. Faulkner break; 48683b862e9aSRoger A. Faulkner 48694bff34e3Sthurlow default: 48704bff34e3Sthurlow return (fs_pathconf(vp, cmd, valp, cr, ct)); 48714bff34e3Sthurlow } 48724bff34e3Sthurlow return (0); 48734bff34e3Sthurlow } 48744bff34e3Sthurlow 48757568150aSgwr /* ARGSUSED */ 48767568150aSgwr static int 48777568150aSgwr smbfs_getsecattr(vnode_t *vp, vsecattr_t *vsa, int flag, cred_t *cr, 48787568150aSgwr caller_context_t *ct) 48797568150aSgwr { 48807568150aSgwr vfs_t *vfsp; 48817568150aSgwr smbmntinfo_t *smi; 488202d09e03SGordon Ross int error; 48837568150aSgwr uint_t mask; 48847568150aSgwr 48857568150aSgwr vfsp = vp->v_vfsp; 48867568150aSgwr smi = VFTOSMI(vfsp); 48877568150aSgwr 4888a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 48897568150aSgwr return (EIO); 48907568150aSgwr 48917568150aSgwr if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED) 48927568150aSgwr return (EIO); 48937568150aSgwr 48947568150aSgwr /* 48957568150aSgwr * Our _pathconf indicates _ACL_ACE_ENABLED, 48967568150aSgwr * so we should only see VSA_ACE, etc here. 48977568150aSgwr * Note: vn_create asks for VSA_DFACLCNT, 48987568150aSgwr * and it expects ENOSYS and empty data. 48997568150aSgwr */ 49007568150aSgwr mask = vsa->vsa_mask & (VSA_ACE | VSA_ACECNT | 49017568150aSgwr VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES); 49027568150aSgwr if (mask == 0) 49037568150aSgwr return (ENOSYS); 49047568150aSgwr 490502d09e03SGordon Ross if (smi->smi_flags & SMI_ACL) 4906bd7c6f51SGordon Ross error = smbfs_acl_getvsa(vp, vsa, flag, cr); 490702d09e03SGordon Ross else 49087568150aSgwr error = ENOSYS; 49097568150aSgwr 49107568150aSgwr if (error == ENOSYS) 49117568150aSgwr error = fs_fab_acl(vp, vsa, flag, cr, ct); 49127568150aSgwr 49137568150aSgwr return (error); 49147568150aSgwr } 49157568150aSgwr 49167568150aSgwr /* ARGSUSED */ 49177568150aSgwr static int 49187568150aSgwr smbfs_setsecattr(vnode_t *vp, vsecattr_t *vsa, int flag, cred_t *cr, 49197568150aSgwr caller_context_t *ct) 49207568150aSgwr { 49217568150aSgwr vfs_t *vfsp; 49227568150aSgwr smbmntinfo_t *smi; 49237568150aSgwr int error; 49247568150aSgwr uint_t mask; 49257568150aSgwr 49267568150aSgwr vfsp = vp->v_vfsp; 49277568150aSgwr smi = VFTOSMI(vfsp); 49287568150aSgwr 4929a19609f8Sjv if (curproc->p_zone != smi->smi_zone_ref.zref_zone) 49307568150aSgwr return (EIO); 49317568150aSgwr 49327568150aSgwr if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED) 49337568150aSgwr return (EIO); 49347568150aSgwr 49357568150aSgwr /* 49367568150aSgwr * Our _pathconf indicates _ACL_ACE_ENABLED, 49377568150aSgwr * so we should only see VSA_ACE, etc here. 49387568150aSgwr */ 49397568150aSgwr mask = vsa->vsa_mask & (VSA_ACE | VSA_ACECNT); 49407568150aSgwr if (mask == 0) 49417568150aSgwr return (ENOSYS); 49427568150aSgwr 49437568150aSgwr if (vfsp->vfs_flag & VFS_RDONLY) 49447568150aSgwr return (EROFS); 49457568150aSgwr 494602d09e03SGordon Ross /* 494702d09e03SGordon Ross * Allow only the mount owner to do this. 494802d09e03SGordon Ross * See comments at smbfs_access_rwx. 494902d09e03SGordon Ross */ 495002d09e03SGordon Ross error = secpolicy_vnode_setdac(cr, smi->smi_uid); 495102d09e03SGordon Ross if (error != 0) 495202d09e03SGordon Ross return (error); 495302d09e03SGordon Ross 495402d09e03SGordon Ross if (smi->smi_flags & SMI_ACL) 4955bd7c6f51SGordon Ross error = smbfs_acl_setvsa(vp, vsa, flag, cr); 495602d09e03SGordon Ross else 49577568150aSgwr error = ENOSYS; 49587568150aSgwr 49597568150aSgwr return (error); 49607568150aSgwr } 49614bff34e3Sthurlow 49624bff34e3Sthurlow 49634bff34e3Sthurlow /* 49644bff34e3Sthurlow * XXX 49654bff34e3Sthurlow * This op should eventually support PSARC 2007/268. 49664bff34e3Sthurlow */ 49674bff34e3Sthurlow static int 49684bff34e3Sthurlow smbfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr, 49694bff34e3Sthurlow caller_context_t *ct) 49704bff34e3Sthurlow { 4971a19609f8Sjv if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone) 49724bff34e3Sthurlow return (EIO); 49734bff34e3Sthurlow 49744bff34e3Sthurlow if (VTOSMI(vp)->smi_flags & SMI_LLOCK) 49754bff34e3Sthurlow return (fs_shrlock(vp, cmd, shr, flag, cr, ct)); 49764bff34e3Sthurlow else 49774bff34e3Sthurlow return (ENOSYS); 49784bff34e3Sthurlow } 49795f4fc069Sjilinxpd 49805f4fc069Sjilinxpd 49815f4fc069Sjilinxpd /* 49825f4fc069Sjilinxpd * Most unimplemented ops will return ENOSYS because of fs_nosys(). 49835f4fc069Sjilinxpd * The only ops where that won't work are ACCESS (due to open(2) 49845f4fc069Sjilinxpd * failures) and ... (anything else left?) 49855f4fc069Sjilinxpd */ 49865f4fc069Sjilinxpd const fs_operation_def_t smbfs_vnodeops_template[] = { 49875f4fc069Sjilinxpd VOPNAME_OPEN, { .vop_open = smbfs_open }, 49885f4fc069Sjilinxpd VOPNAME_CLOSE, { .vop_close = smbfs_close }, 49895f4fc069Sjilinxpd VOPNAME_READ, { .vop_read = smbfs_read }, 49905f4fc069Sjilinxpd VOPNAME_WRITE, { .vop_write = smbfs_write }, 49915f4fc069Sjilinxpd VOPNAME_IOCTL, { .vop_ioctl = smbfs_ioctl }, 49925f4fc069Sjilinxpd VOPNAME_GETATTR, { .vop_getattr = smbfs_getattr }, 49935f4fc069Sjilinxpd VOPNAME_SETATTR, { .vop_setattr = smbfs_setattr }, 49945f4fc069Sjilinxpd VOPNAME_ACCESS, { .vop_access = smbfs_access }, 49955f4fc069Sjilinxpd VOPNAME_LOOKUP, { .vop_lookup = smbfs_lookup }, 49965f4fc069Sjilinxpd VOPNAME_CREATE, { .vop_create = smbfs_create }, 49975f4fc069Sjilinxpd VOPNAME_REMOVE, { .vop_remove = smbfs_remove }, 49985f4fc069Sjilinxpd VOPNAME_LINK, { .vop_link = smbfs_link }, 49995f4fc069Sjilinxpd VOPNAME_RENAME, { .vop_rename = smbfs_rename }, 50005f4fc069Sjilinxpd VOPNAME_MKDIR, { .vop_mkdir = smbfs_mkdir }, 50015f4fc069Sjilinxpd VOPNAME_RMDIR, { .vop_rmdir = smbfs_rmdir }, 50025f4fc069Sjilinxpd VOPNAME_READDIR, { .vop_readdir = smbfs_readdir }, 50035f4fc069Sjilinxpd VOPNAME_SYMLINK, { .vop_symlink = smbfs_symlink }, 50045f4fc069Sjilinxpd VOPNAME_READLINK, { .vop_readlink = smbfs_readlink }, 50055f4fc069Sjilinxpd VOPNAME_FSYNC, { .vop_fsync = smbfs_fsync }, 50065f4fc069Sjilinxpd VOPNAME_INACTIVE, { .vop_inactive = smbfs_inactive }, 50075f4fc069Sjilinxpd VOPNAME_FID, { .vop_fid = smbfs_fid }, 50085f4fc069Sjilinxpd VOPNAME_RWLOCK, { .vop_rwlock = smbfs_rwlock }, 50095f4fc069Sjilinxpd VOPNAME_RWUNLOCK, { .vop_rwunlock = smbfs_rwunlock }, 50105f4fc069Sjilinxpd VOPNAME_SEEK, { .vop_seek = smbfs_seek }, 50115f4fc069Sjilinxpd VOPNAME_FRLOCK, { .vop_frlock = smbfs_frlock }, 50125f4fc069Sjilinxpd VOPNAME_SPACE, { .vop_space = smbfs_space }, 50135f4fc069Sjilinxpd VOPNAME_REALVP, { .vop_realvp = smbfs_realvp }, 5014*8329232eSGordon Ross #ifdef _KERNEL 50155f4fc069Sjilinxpd VOPNAME_GETPAGE, { .vop_getpage = smbfs_getpage }, 50165f4fc069Sjilinxpd VOPNAME_PUTPAGE, { .vop_putpage = smbfs_putpage }, 50175f4fc069Sjilinxpd VOPNAME_MAP, { .vop_map = smbfs_map }, 50185f4fc069Sjilinxpd VOPNAME_ADDMAP, { .vop_addmap = smbfs_addmap }, 50195f4fc069Sjilinxpd VOPNAME_DELMAP, { .vop_delmap = smbfs_delmap }, 5020*8329232eSGordon Ross #endif // _KERNEL 50215f4fc069Sjilinxpd VOPNAME_PATHCONF, { .vop_pathconf = smbfs_pathconf }, 50225f4fc069Sjilinxpd VOPNAME_SETSECATTR, { .vop_setsecattr = smbfs_setsecattr }, 50235f4fc069Sjilinxpd VOPNAME_GETSECATTR, { .vop_getsecattr = smbfs_getsecattr }, 50245f4fc069Sjilinxpd VOPNAME_SHRLOCK, { .vop_shrlock = smbfs_shrlock }, 50255f4fc069Sjilinxpd #ifdef SMBFS_VNEVENT 50265f4fc069Sjilinxpd VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 50275f4fc069Sjilinxpd #endif 50285f4fc069Sjilinxpd { NULL, NULL } 50295f4fc069Sjilinxpd }; 5030