1*4bff34e3Sthurlow /* 2*4bff34e3Sthurlow * Copyright (c) 2000-2001 Boris Popov 3*4bff34e3Sthurlow * All rights reserved. 4*4bff34e3Sthurlow * 5*4bff34e3Sthurlow * Redistribution and use in source and binary forms, with or without 6*4bff34e3Sthurlow * modification, are permitted provided that the following conditions 7*4bff34e3Sthurlow * are met: 8*4bff34e3Sthurlow * 1. Redistributions of source code must retain the above copyright 9*4bff34e3Sthurlow * notice, this list of conditions and the following disclaimer. 10*4bff34e3Sthurlow * 2. Redistributions in binary form must reproduce the above copyright 11*4bff34e3Sthurlow * notice, this list of conditions and the following disclaimer in the 12*4bff34e3Sthurlow * documentation and/or other materials provided with the distribution. 13*4bff34e3Sthurlow * 3. All advertising materials mentioning features or use of this software 14*4bff34e3Sthurlow * must display the following acknowledgement: 15*4bff34e3Sthurlow * This product includes software developed by Boris Popov. 16*4bff34e3Sthurlow * 4. Neither the name of the author nor the names of any co-contributors 17*4bff34e3Sthurlow * may be used to endorse or promote products derived from this software 18*4bff34e3Sthurlow * without specific prior written permission. 19*4bff34e3Sthurlow * 20*4bff34e3Sthurlow * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21*4bff34e3Sthurlow * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22*4bff34e3Sthurlow * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23*4bff34e3Sthurlow * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24*4bff34e3Sthurlow * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25*4bff34e3Sthurlow * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26*4bff34e3Sthurlow * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27*4bff34e3Sthurlow * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28*4bff34e3Sthurlow * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29*4bff34e3Sthurlow * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30*4bff34e3Sthurlow * SUCH DAMAGE. 31*4bff34e3Sthurlow * 32*4bff34e3Sthurlow * $Id: smbfs_vnops.c,v 1.128.36.1 2005/05/27 02:35:28 lindak Exp $ 33*4bff34e3Sthurlow */ 34*4bff34e3Sthurlow 35*4bff34e3Sthurlow /* 36*4bff34e3Sthurlow * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 37*4bff34e3Sthurlow * Use is subject to license terms. 38*4bff34e3Sthurlow */ 39*4bff34e3Sthurlow 40*4bff34e3Sthurlow #pragma ident "%Z%%M% %I% %E% SMI" 41*4bff34e3Sthurlow 42*4bff34e3Sthurlow #include <sys/systm.h> 43*4bff34e3Sthurlow #include <sys/cred.h> 44*4bff34e3Sthurlow #include <sys/vnode.h> 45*4bff34e3Sthurlow #include <sys/vfs.h> 46*4bff34e3Sthurlow #include <sys/uio.h> 47*4bff34e3Sthurlow #include <sys/dirent.h> 48*4bff34e3Sthurlow #include <sys/errno.h> 49*4bff34e3Sthurlow #include <sys/sysmacros.h> 50*4bff34e3Sthurlow #include <sys/kmem.h> 51*4bff34e3Sthurlow #include <sys/cmn_err.h> 52*4bff34e3Sthurlow #include <sys/dnlc.h> 53*4bff34e3Sthurlow #include <sys/vfs_opreg.h> 54*4bff34e3Sthurlow #include <sys/policy.h> 55*4bff34e3Sthurlow 56*4bff34e3Sthurlow #include <netsmb/smb_osdep.h> 57*4bff34e3Sthurlow #include <netsmb/smb.h> 58*4bff34e3Sthurlow #include <netsmb/smb_conn.h> 59*4bff34e3Sthurlow #include <netsmb/smb_subr.h> 60*4bff34e3Sthurlow 61*4bff34e3Sthurlow #include <smbfs/smbfs.h> 62*4bff34e3Sthurlow #include <smbfs/smbfs_node.h> 63*4bff34e3Sthurlow #include <smbfs/smbfs_subr.h> 64*4bff34e3Sthurlow 65*4bff34e3Sthurlow #include <fs/fs_subr.h> 66*4bff34e3Sthurlow 67*4bff34e3Sthurlow /* 68*4bff34e3Sthurlow * These characters are illegal in NTFS file names. 69*4bff34e3Sthurlow * ref: http://support.microsoft.com/kb/147438 70*4bff34e3Sthurlow */ 71*4bff34e3Sthurlow static const char illegal_chars[] = { 72*4bff34e3Sthurlow '\\', /* back slash */ 73*4bff34e3Sthurlow '/', /* slash */ 74*4bff34e3Sthurlow ':', /* colon */ 75*4bff34e3Sthurlow '*', /* asterisk */ 76*4bff34e3Sthurlow '?', /* question mark */ 77*4bff34e3Sthurlow '"', /* double quote */ 78*4bff34e3Sthurlow '<', /* less than sign */ 79*4bff34e3Sthurlow '>', /* greater than sign */ 80*4bff34e3Sthurlow '|', /* vertical bar */ 81*4bff34e3Sthurlow 0 82*4bff34e3Sthurlow }; 83*4bff34e3Sthurlow 84*4bff34e3Sthurlow /* 85*4bff34e3Sthurlow * Turning this on causes nodes to be created in the cache 86*4bff34e3Sthurlow * during directory listings. The "fast" claim is debatable, 87*4bff34e3Sthurlow * and the effects on the cache can be undesirable. 88*4bff34e3Sthurlow */ 89*4bff34e3Sthurlow 90*4bff34e3Sthurlow /* local static function defines */ 91*4bff34e3Sthurlow 92*4bff34e3Sthurlow static int smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr, 93*4bff34e3Sthurlow int dnlc, caller_context_t *); 94*4bff34e3Sthurlow static int smbfsrename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, 95*4bff34e3Sthurlow cred_t *cr, caller_context_t *); 96*4bff34e3Sthurlow static int smbfssetattr(vnode_t *, struct vattr *, int, cred_t *); 97*4bff34e3Sthurlow static int smbfs_accessx(void *, int, cred_t *); 98*4bff34e3Sthurlow static int smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, 99*4bff34e3Sthurlow caller_context_t *); 100*4bff34e3Sthurlow /* 101*4bff34e3Sthurlow * These are the vnode ops routines which implement the vnode interface to 102*4bff34e3Sthurlow * the networked file system. These routines just take their parameters, 103*4bff34e3Sthurlow * make them look networkish by putting the right info into interface structs, 104*4bff34e3Sthurlow * and then calling the appropriate remote routine(s) to do the work. 105*4bff34e3Sthurlow * 106*4bff34e3Sthurlow * Note on directory name lookup cacheing: If we detect a stale fhandle, 107*4bff34e3Sthurlow * we purge the directory cache relative to that vnode. This way, the 108*4bff34e3Sthurlow * user won't get burned by the cache repeatedly. See <smbfs/smbnode.h> for 109*4bff34e3Sthurlow * more details on smbnode locking. 110*4bff34e3Sthurlow */ 111*4bff34e3Sthurlow 112*4bff34e3Sthurlow static int smbfs_open(vnode_t **, int, cred_t *, caller_context_t *); 113*4bff34e3Sthurlow static int smbfs_close(vnode_t *, int, int, offset_t, cred_t *, 114*4bff34e3Sthurlow caller_context_t *); 115*4bff34e3Sthurlow static int smbfs_read(vnode_t *, struct uio *, int, cred_t *, 116*4bff34e3Sthurlow caller_context_t *); 117*4bff34e3Sthurlow static int smbfs_write(vnode_t *, struct uio *, int, cred_t *, 118*4bff34e3Sthurlow caller_context_t *); 119*4bff34e3Sthurlow static int smbfs_getattr(vnode_t *, struct vattr *, int, cred_t *, 120*4bff34e3Sthurlow caller_context_t *); 121*4bff34e3Sthurlow static int smbfs_setattr(vnode_t *, struct vattr *, int, cred_t *, 122*4bff34e3Sthurlow caller_context_t *); 123*4bff34e3Sthurlow static int smbfs_access(vnode_t *, int, int, cred_t *, caller_context_t *); 124*4bff34e3Sthurlow static int smbfs_fsync(vnode_t *, int, cred_t *, caller_context_t *); 125*4bff34e3Sthurlow static void smbfs_inactive(vnode_t *, cred_t *, caller_context_t *); 126*4bff34e3Sthurlow static int smbfs_lookup(vnode_t *, char *, vnode_t **, struct pathname *, 127*4bff34e3Sthurlow int, vnode_t *, cred_t *, caller_context_t *, 128*4bff34e3Sthurlow int *, pathname_t *); 129*4bff34e3Sthurlow static int smbfs_create(vnode_t *, char *, struct vattr *, enum vcexcl, 130*4bff34e3Sthurlow int, vnode_t **, cred_t *, int, caller_context_t *, 131*4bff34e3Sthurlow vsecattr_t *); 132*4bff34e3Sthurlow static int smbfs_remove(vnode_t *, char *, cred_t *, caller_context_t *, 133*4bff34e3Sthurlow int); 134*4bff34e3Sthurlow static int smbfs_rename(vnode_t *, char *, vnode_t *, char *, cred_t *, 135*4bff34e3Sthurlow caller_context_t *, int); 136*4bff34e3Sthurlow static int smbfs_mkdir(vnode_t *, char *, struct vattr *, vnode_t **, 137*4bff34e3Sthurlow cred_t *, caller_context_t *, int, vsecattr_t *); 138*4bff34e3Sthurlow static int smbfs_rmdir(vnode_t *, char *, vnode_t *, cred_t *, 139*4bff34e3Sthurlow caller_context_t *, int); 140*4bff34e3Sthurlow static int smbfs_readdir(vnode_t *, struct uio *, cred_t *, int *, 141*4bff34e3Sthurlow caller_context_t *, int); 142*4bff34e3Sthurlow static int smbfs_rwlock(vnode_t *, int, caller_context_t *); 143*4bff34e3Sthurlow static void smbfs_rwunlock(vnode_t *, int, caller_context_t *); 144*4bff34e3Sthurlow static int smbfs_seek(vnode_t *, offset_t, offset_t *, caller_context_t *); 145*4bff34e3Sthurlow static int smbfs_frlock(vnode_t *, int, struct flock64 *, int, offset_t, 146*4bff34e3Sthurlow struct flk_callback *, cred_t *, caller_context_t *); 147*4bff34e3Sthurlow static int smbfs_space(vnode_t *, int, struct flock64 *, int, offset_t, 148*4bff34e3Sthurlow cred_t *, caller_context_t *); 149*4bff34e3Sthurlow static int smbfs_pathconf(vnode_t *, int, ulong_t *, cred_t *, 150*4bff34e3Sthurlow caller_context_t *); 151*4bff34e3Sthurlow static int smbfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *, 152*4bff34e3Sthurlow caller_context_t *); 153*4bff34e3Sthurlow 154*4bff34e3Sthurlow /* Dummy function to use until correct function is ported in */ 155*4bff34e3Sthurlow int noop_vnodeop() { 156*4bff34e3Sthurlow return (0); 157*4bff34e3Sthurlow } 158*4bff34e3Sthurlow 159*4bff34e3Sthurlow struct vnodeops *smbfs_vnodeops = NULL; 160*4bff34e3Sthurlow 161*4bff34e3Sthurlow /* 162*4bff34e3Sthurlow * Most unimplemented ops will return ENOSYS because of fs_nosys(). 163*4bff34e3Sthurlow * The only ops where that won't work are ACCESS (due to open(2) 164*4bff34e3Sthurlow * failures) and GETSECATTR (due to acl(2) failures). 165*4bff34e3Sthurlow */ 166*4bff34e3Sthurlow const fs_operation_def_t smbfs_vnodeops_template[] = { 167*4bff34e3Sthurlow { VOPNAME_OPEN, { .vop_open = smbfs_open } }, 168*4bff34e3Sthurlow { VOPNAME_CLOSE, { .vop_close = smbfs_close } }, 169*4bff34e3Sthurlow { VOPNAME_READ, { .vop_read = smbfs_read } }, 170*4bff34e3Sthurlow { VOPNAME_WRITE, { .vop_write = smbfs_write } }, 171*4bff34e3Sthurlow { VOPNAME_IOCTL, { .error = fs_nosys } }, /* smbfs_ioctl, */ 172*4bff34e3Sthurlow { VOPNAME_GETATTR, { .vop_getattr = smbfs_getattr } }, 173*4bff34e3Sthurlow { VOPNAME_SETATTR, { .vop_setattr = smbfs_setattr } }, 174*4bff34e3Sthurlow { VOPNAME_ACCESS, { .vop_access = smbfs_access } }, 175*4bff34e3Sthurlow { VOPNAME_LOOKUP, { .vop_lookup = smbfs_lookup } }, 176*4bff34e3Sthurlow { VOPNAME_CREATE, { .vop_create = smbfs_create } }, 177*4bff34e3Sthurlow { VOPNAME_REMOVE, { .vop_remove = smbfs_remove } }, 178*4bff34e3Sthurlow { VOPNAME_LINK, { .error = fs_nosys } }, /* smbfs_link, */ 179*4bff34e3Sthurlow { VOPNAME_RENAME, { .vop_rename = smbfs_rename } }, 180*4bff34e3Sthurlow { VOPNAME_MKDIR, { .vop_mkdir = smbfs_mkdir } }, 181*4bff34e3Sthurlow { VOPNAME_RMDIR, { .vop_rmdir = smbfs_rmdir } }, 182*4bff34e3Sthurlow { VOPNAME_READDIR, { .vop_readdir = smbfs_readdir } }, 183*4bff34e3Sthurlow { VOPNAME_SYMLINK, { .error = fs_nosys } }, /* smbfs_symlink, */ 184*4bff34e3Sthurlow { VOPNAME_READLINK, { .error = fs_nosys } }, /* smbfs_readlink, */ 185*4bff34e3Sthurlow { VOPNAME_FSYNC, { .vop_fsync = smbfs_fsync } }, 186*4bff34e3Sthurlow { VOPNAME_INACTIVE, { .vop_inactive = smbfs_inactive } }, 187*4bff34e3Sthurlow { VOPNAME_FID, { .error = fs_nosys } }, /* smbfs_fid, */ 188*4bff34e3Sthurlow { VOPNAME_RWLOCK, { .vop_rwlock = smbfs_rwlock } }, 189*4bff34e3Sthurlow { VOPNAME_RWUNLOCK, { .vop_rwunlock = smbfs_rwunlock } }, 190*4bff34e3Sthurlow { VOPNAME_SEEK, { .vop_seek = smbfs_seek } }, 191*4bff34e3Sthurlow { VOPNAME_FRLOCK, { .vop_frlock = smbfs_frlock } }, 192*4bff34e3Sthurlow { VOPNAME_SPACE, { .vop_space = smbfs_space } }, 193*4bff34e3Sthurlow { VOPNAME_REALVP, { .error = fs_nosys } }, /* smbfs_realvp, */ 194*4bff34e3Sthurlow { VOPNAME_GETPAGE, { .error = fs_nosys } }, /* smbfs_getpage, */ 195*4bff34e3Sthurlow { VOPNAME_PUTPAGE, { .error = fs_nosys } }, /* smbfs_putpage, */ 196*4bff34e3Sthurlow { VOPNAME_MAP, { .error = fs_nosys } }, /* smbfs_map, */ 197*4bff34e3Sthurlow { VOPNAME_ADDMAP, { .error = fs_nosys } }, /* smbfs_addmap, */ 198*4bff34e3Sthurlow { VOPNAME_DELMAP, { .error = fs_nosys } }, /* smbfs_delmap, */ 199*4bff34e3Sthurlow { VOPNAME_DUMP, { .error = fs_nosys } }, /* smbfs_dump, */ 200*4bff34e3Sthurlow { VOPNAME_PATHCONF, { .vop_pathconf = smbfs_pathconf } }, 201*4bff34e3Sthurlow { VOPNAME_PAGEIO, { .error = fs_nosys } }, /* smbfs_pageio, */ 202*4bff34e3Sthurlow { VOPNAME_SETSECATTR, { .error = fs_nosys } }, /* smbfs_setsecattr, */ 203*4bff34e3Sthurlow { VOPNAME_GETSECATTR, { .error = noop_vnodeop } }, 204*4bff34e3Sthurlow /* smbfs_getsecattr, */ 205*4bff34e3Sthurlow { VOPNAME_SHRLOCK, { .vop_shrlock = smbfs_shrlock } }, 206*4bff34e3Sthurlow { NULL, NULL } 207*4bff34e3Sthurlow }; 208*4bff34e3Sthurlow 209*4bff34e3Sthurlow /* 210*4bff34e3Sthurlow * XXX 211*4bff34e3Sthurlow * When new and relevant functionality is enabled, we should be 212*4bff34e3Sthurlow * calling vfs_set_feature() to inform callers that pieces of 213*4bff34e3Sthurlow * functionality are available, per PSARC 2007/227, e.g. 214*4bff34e3Sthurlow * 215*4bff34e3Sthurlow * VFSFT_XVATTR Supports xvattr for attrs 216*4bff34e3Sthurlow * VFSFT_CASEINSENSITIVE Supports case-insensitive 217*4bff34e3Sthurlow * VFSFT_NOCASESENSITIVE NOT case-sensitive 218*4bff34e3Sthurlow * VFSFT_DIRENTFLAGS Supports dirent flags 219*4bff34e3Sthurlow * VFSFT_ACLONCREATE Supports ACL on create 220*4bff34e3Sthurlow * VFSFT_ACEMASKONACCESS Can use ACEMASK for access 221*4bff34e3Sthurlow */ 222*4bff34e3Sthurlow /* ARGSUSED */ 223*4bff34e3Sthurlow static int 224*4bff34e3Sthurlow smbfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) 225*4bff34e3Sthurlow { 226*4bff34e3Sthurlow struct vattr va; 227*4bff34e3Sthurlow smbnode_t *np; 228*4bff34e3Sthurlow vnode_t *vp; 229*4bff34e3Sthurlow u_int32_t rights, rightsrcvd; 230*4bff34e3Sthurlow u_int16_t fid, oldfid; 231*4bff34e3Sthurlow struct smb_cred scred; 232*4bff34e3Sthurlow smbmntinfo_t *smi; 233*4bff34e3Sthurlow cred_t *oldcr; 234*4bff34e3Sthurlow int attrcacheupdated = 0; 235*4bff34e3Sthurlow int tmperror; 236*4bff34e3Sthurlow int error = 0; 237*4bff34e3Sthurlow 238*4bff34e3Sthurlow vp = *vpp; 239*4bff34e3Sthurlow np = VTOSMB(vp); 240*4bff34e3Sthurlow smi = VTOSMI(vp); 241*4bff34e3Sthurlow 242*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 243*4bff34e3Sthurlow return (EIO); 244*4bff34e3Sthurlow 245*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 246*4bff34e3Sthurlow return (EIO); 247*4bff34e3Sthurlow 248*4bff34e3Sthurlow if (vp->v_type != VREG && vp->v_type != VDIR) { /* XXX VLNK? */ 249*4bff34e3Sthurlow SMBVDEBUG("open eacces vtype=%d\n", vp->v_type); 250*4bff34e3Sthurlow return (EACCES); 251*4bff34e3Sthurlow } 252*4bff34e3Sthurlow 253*4bff34e3Sthurlow /* 254*4bff34e3Sthurlow * Get exclusive access to n_fid and related stuff. 255*4bff34e3Sthurlow * No returns after this until out. 256*4bff34e3Sthurlow */ 257*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp))) 258*4bff34e3Sthurlow return (EINTR); 259*4bff34e3Sthurlow smb_credinit(&scred, curproc, cr); 260*4bff34e3Sthurlow 261*4bff34e3Sthurlow /* 262*4bff34e3Sthurlow * Directory open is easy. 263*4bff34e3Sthurlow */ 264*4bff34e3Sthurlow if (vp->v_type == VDIR) { 265*4bff34e3Sthurlow np->n_dirrefs++; 266*4bff34e3Sthurlow goto have_fid; 267*4bff34e3Sthurlow } 268*4bff34e3Sthurlow 269*4bff34e3Sthurlow /* 270*4bff34e3Sthurlow * If caller specified O_TRUNC/FTRUNC, then be sure to set 271*4bff34e3Sthurlow * FWRITE (to drive successful setattr(size=0) after open) 272*4bff34e3Sthurlow */ 273*4bff34e3Sthurlow if (flag & FTRUNC) 274*4bff34e3Sthurlow flag |= FWRITE; 275*4bff34e3Sthurlow 276*4bff34e3Sthurlow /* 277*4bff34e3Sthurlow * If we already have it open, check to see if current rights 278*4bff34e3Sthurlow * are sufficient for this open. 279*4bff34e3Sthurlow */ 280*4bff34e3Sthurlow if (np->n_fidrefs) { 281*4bff34e3Sthurlow int upgrade = 0; 282*4bff34e3Sthurlow 283*4bff34e3Sthurlow /* BEGIN CSTYLED */ 284*4bff34e3Sthurlow if ((flag & FWRITE) && 285*4bff34e3Sthurlow !(np->n_rights & (SA_RIGHT_FILE_WRITE_DATA | 286*4bff34e3Sthurlow GENERIC_RIGHT_ALL_ACCESS | 287*4bff34e3Sthurlow GENERIC_RIGHT_WRITE_ACCESS))) 288*4bff34e3Sthurlow upgrade = 1; 289*4bff34e3Sthurlow if ((flag & FREAD) && 290*4bff34e3Sthurlow !(np->n_rights & (SA_RIGHT_FILE_READ_DATA | 291*4bff34e3Sthurlow GENERIC_RIGHT_ALL_ACCESS | 292*4bff34e3Sthurlow GENERIC_RIGHT_READ_ACCESS))) 293*4bff34e3Sthurlow upgrade = 1; 294*4bff34e3Sthurlow /* END CSTYLED */ 295*4bff34e3Sthurlow if (!upgrade) { 296*4bff34e3Sthurlow /* 297*4bff34e3Sthurlow * the existing open is good enough 298*4bff34e3Sthurlow */ 299*4bff34e3Sthurlow np->n_fidrefs++; 300*4bff34e3Sthurlow goto have_fid; 301*4bff34e3Sthurlow } 302*4bff34e3Sthurlow } 303*4bff34e3Sthurlow rights = np->n_fidrefs ? np->n_rights : 0; 304*4bff34e3Sthurlow 305*4bff34e3Sthurlow /* 306*4bff34e3Sthurlow * we always ask for READ_CONTROL so we can always get the 307*4bff34e3Sthurlow * owner/group IDs to satisfy a stat. 308*4bff34e3Sthurlow * XXX: verify that works with "drop boxes" 309*4bff34e3Sthurlow */ 310*4bff34e3Sthurlow rights |= STD_RIGHT_READ_CONTROL_ACCESS; 311*4bff34e3Sthurlow if ((flag & FREAD)) 312*4bff34e3Sthurlow rights |= SA_RIGHT_FILE_READ_DATA; 313*4bff34e3Sthurlow if ((flag & FWRITE)) 314*4bff34e3Sthurlow rights |= SA_RIGHT_FILE_APPEND_DATA | SA_RIGHT_FILE_WRITE_DATA; 315*4bff34e3Sthurlow 316*4bff34e3Sthurlow /* XXX: open gets the current size, but we don't use it. */ 317*4bff34e3Sthurlow error = smbfs_smb_open(np, rights, &scred, &attrcacheupdated, &fid, 318*4bff34e3Sthurlow NULL, 0, 0, NULL, &rightsrcvd); 319*4bff34e3Sthurlow if (error) 320*4bff34e3Sthurlow goto out; 321*4bff34e3Sthurlow 322*4bff34e3Sthurlow /* 323*4bff34e3Sthurlow * We have a new FID and access rights. 324*4bff34e3Sthurlow */ 325*4bff34e3Sthurlow oldfid = np->n_fid; 326*4bff34e3Sthurlow np->n_fid = fid; 327*4bff34e3Sthurlow np->n_rights = rightsrcvd; 328*4bff34e3Sthurlow np->n_fidrefs++; 329*4bff34e3Sthurlow if (np->n_fidrefs > 1) { 330*4bff34e3Sthurlow /* 331*4bff34e3Sthurlow * We already had it open (presumably because 332*4bff34e3Sthurlow * it was open with insufficient rights.) 333*4bff34e3Sthurlow * Close old wire-open. 334*4bff34e3Sthurlow */ 335*4bff34e3Sthurlow tmperror = smbfs_smb_close(smi->smi_share, 336*4bff34e3Sthurlow oldfid, &np->n_mtime, &scred); 337*4bff34e3Sthurlow if (tmperror) 338*4bff34e3Sthurlow SMBVDEBUG("error %d closing %s\n", 339*4bff34e3Sthurlow tmperror, np->n_rpath); 340*4bff34e3Sthurlow } 341*4bff34e3Sthurlow 342*4bff34e3Sthurlow /* 343*4bff34e3Sthurlow * This thread did the open. 344*4bff34e3Sthurlow * Save our credentials too. 345*4bff34e3Sthurlow */ 346*4bff34e3Sthurlow mutex_enter(&np->r_statelock); 347*4bff34e3Sthurlow oldcr = np->r_cred; 348*4bff34e3Sthurlow np->r_cred = cr; 349*4bff34e3Sthurlow crhold(cr); 350*4bff34e3Sthurlow if (oldcr) 351*4bff34e3Sthurlow crfree(oldcr); 352*4bff34e3Sthurlow mutex_exit(&np->r_statelock); 353*4bff34e3Sthurlow 354*4bff34e3Sthurlow have_fid: 355*4bff34e3Sthurlow /* Get attributes (maybe). */ 356*4bff34e3Sthurlow 357*4bff34e3Sthurlow 358*4bff34e3Sthurlow /* Darwin (derived) code. */ 359*4bff34e3Sthurlow 360*4bff34e3Sthurlow va.va_mask = AT_MTIME; 361*4bff34e3Sthurlow if (np->n_flag & NMODIFIED) 362*4bff34e3Sthurlow smbfs_attr_cacheremove(np); 363*4bff34e3Sthurlow 364*4bff34e3Sthurlow /* 365*4bff34e3Sthurlow * Try to get attributes, but don't bail on error. 366*4bff34e3Sthurlow * We already hold r_lkserlock/reader so note: 367*4bff34e3Sthurlow * this call will recursively take r_lkserlock. 368*4bff34e3Sthurlow */ 369*4bff34e3Sthurlow tmperror = smbfsgetattr(vp, &va, cr); 370*4bff34e3Sthurlow if (tmperror) 371*4bff34e3Sthurlow SMBERROR("getattr failed, error=%d", tmperror); 372*4bff34e3Sthurlow else 373*4bff34e3Sthurlow np->n_mtime.tv_sec = va.va_mtime.tv_sec; 374*4bff34e3Sthurlow 375*4bff34e3Sthurlow out: 376*4bff34e3Sthurlow smb_credrele(&scred); 377*4bff34e3Sthurlow smbfs_rw_exit(&np->r_lkserlock); 378*4bff34e3Sthurlow return (error); 379*4bff34e3Sthurlow } 380*4bff34e3Sthurlow 381*4bff34e3Sthurlow /*ARGSUSED*/ 382*4bff34e3Sthurlow static int 383*4bff34e3Sthurlow smbfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr, 384*4bff34e3Sthurlow caller_context_t *ct) 385*4bff34e3Sthurlow { 386*4bff34e3Sthurlow smbnode_t *np; 387*4bff34e3Sthurlow int error = 0; 388*4bff34e3Sthurlow struct smb_cred scred; 389*4bff34e3Sthurlow 390*4bff34e3Sthurlow np = VTOSMB(vp); 391*4bff34e3Sthurlow 392*4bff34e3Sthurlow /* 393*4bff34e3Sthurlow * Don't "bail out" for VFS_UNMOUNTED here, 394*4bff34e3Sthurlow * as we want to do cleanup, etc. 395*4bff34e3Sthurlow */ 396*4bff34e3Sthurlow 397*4bff34e3Sthurlow /* 398*4bff34e3Sthurlow * zone_enter(2) prevents processes from changing zones with SMBFS files 399*4bff34e3Sthurlow * open; if we happen to get here from the wrong zone we can't do 400*4bff34e3Sthurlow * anything over the wire. 401*4bff34e3Sthurlow */ 402*4bff34e3Sthurlow if (VTOSMI(vp)->smi_zone != curproc->p_zone) { 403*4bff34e3Sthurlow /* 404*4bff34e3Sthurlow * We could attempt to clean up locks, except we're sure 405*4bff34e3Sthurlow * that the current process didn't acquire any locks on 406*4bff34e3Sthurlow * the file: any attempt to lock a file belong to another zone 407*4bff34e3Sthurlow * will fail, and one can't lock an SMBFS file and then change 408*4bff34e3Sthurlow * zones, as that fails too. 409*4bff34e3Sthurlow * 410*4bff34e3Sthurlow * Returning an error here is the sane thing to do. A 411*4bff34e3Sthurlow * subsequent call to VN_RELE() which translates to a 412*4bff34e3Sthurlow * smbfs_inactive() will clean up state: if the zone of the 413*4bff34e3Sthurlow * vnode's origin is still alive and kicking, an async worker 414*4bff34e3Sthurlow * thread will handle the request (from the correct zone), and 415*4bff34e3Sthurlow * everything (minus the final smbfs_getattr_otw() call) should 416*4bff34e3Sthurlow * be OK. If the zone is going away smbfs_async_inactive() will 417*4bff34e3Sthurlow * throw away cached pages inline. 418*4bff34e3Sthurlow */ 419*4bff34e3Sthurlow return (EIO); 420*4bff34e3Sthurlow } 421*4bff34e3Sthurlow 422*4bff34e3Sthurlow /* 423*4bff34e3Sthurlow * If we are using local locking for this filesystem, then 424*4bff34e3Sthurlow * release all of the SYSV style record locks. Otherwise, 425*4bff34e3Sthurlow * we are doing network locking and we need to release all 426*4bff34e3Sthurlow * of the network locks. All of the locks held by this 427*4bff34e3Sthurlow * process on this file are released no matter what the 428*4bff34e3Sthurlow * incoming reference count is. 429*4bff34e3Sthurlow */ 430*4bff34e3Sthurlow if (VTOSMI(vp)->smi_flags & SMI_LLOCK) { 431*4bff34e3Sthurlow cleanlocks(vp, ttoproc(curthread)->p_pid, 0); 432*4bff34e3Sthurlow cleanshares(vp, ttoproc(curthread)->p_pid); 433*4bff34e3Sthurlow } 434*4bff34e3Sthurlow 435*4bff34e3Sthurlow if (count > 1) 436*4bff34e3Sthurlow return (0); 437*4bff34e3Sthurlow /* 438*4bff34e3Sthurlow * OK, do "last close" stuff. 439*4bff34e3Sthurlow */ 440*4bff34e3Sthurlow 441*4bff34e3Sthurlow 442*4bff34e3Sthurlow /* 443*4bff34e3Sthurlow * Do the CIFS close. 444*4bff34e3Sthurlow * Darwin code 445*4bff34e3Sthurlow */ 446*4bff34e3Sthurlow 447*4bff34e3Sthurlow /* 448*4bff34e3Sthurlow * Exclusive lock for modifying n_fid stuff. 449*4bff34e3Sthurlow * Don't want this one ever interruptible. 450*4bff34e3Sthurlow */ 451*4bff34e3Sthurlow (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0); 452*4bff34e3Sthurlow smb_credinit(&scred, curproc, cr); 453*4bff34e3Sthurlow 454*4bff34e3Sthurlow error = 0; 455*4bff34e3Sthurlow if (vp->v_type == VDIR) { 456*4bff34e3Sthurlow struct smbfs_fctx *fctx; 457*4bff34e3Sthurlow ASSERT(np->n_dirrefs > 0); 458*4bff34e3Sthurlow if (--np->n_dirrefs) 459*4bff34e3Sthurlow goto out; 460*4bff34e3Sthurlow if ((fctx = np->n_dirseq) != NULL) { 461*4bff34e3Sthurlow np->n_dirseq = NULL; 462*4bff34e3Sthurlow error = smbfs_smb_findclose(fctx, &scred); 463*4bff34e3Sthurlow } 464*4bff34e3Sthurlow } else { 465*4bff34e3Sthurlow uint16_t ofid; 466*4bff34e3Sthurlow ASSERT(np->n_fidrefs > 0); 467*4bff34e3Sthurlow if (--np->n_fidrefs) 468*4bff34e3Sthurlow goto out; 469*4bff34e3Sthurlow if ((ofid = np->n_fid) != SMB_FID_UNUSED) { 470*4bff34e3Sthurlow np->n_fid = SMB_FID_UNUSED; 471*4bff34e3Sthurlow error = smbfs_smb_close(np->n_mount->smi_share, 472*4bff34e3Sthurlow ofid, NULL, &scred); 473*4bff34e3Sthurlow } 474*4bff34e3Sthurlow } 475*4bff34e3Sthurlow if (error) { 476*4bff34e3Sthurlow SMBERROR("error %d closing %s\n", 477*4bff34e3Sthurlow error, np->n_rpath); 478*4bff34e3Sthurlow } 479*4bff34e3Sthurlow 480*4bff34e3Sthurlow if (np->n_flag & NATTRCHANGED) 481*4bff34e3Sthurlow smbfs_attr_cacheremove(np); 482*4bff34e3Sthurlow 483*4bff34e3Sthurlow out: 484*4bff34e3Sthurlow smb_credrele(&scred); 485*4bff34e3Sthurlow smbfs_rw_exit(&np->r_lkserlock); 486*4bff34e3Sthurlow 487*4bff34e3Sthurlow /* don't return any errors */ 488*4bff34e3Sthurlow return (0); 489*4bff34e3Sthurlow } 490*4bff34e3Sthurlow 491*4bff34e3Sthurlow /* ARGSUSED */ 492*4bff34e3Sthurlow static int 493*4bff34e3Sthurlow smbfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr, 494*4bff34e3Sthurlow caller_context_t *ct) 495*4bff34e3Sthurlow { 496*4bff34e3Sthurlow int error; 497*4bff34e3Sthurlow struct vattr va; 498*4bff34e3Sthurlow smbmntinfo_t *smi; 499*4bff34e3Sthurlow smbnode_t *np; 500*4bff34e3Sthurlow /* u_offset_t off; */ 501*4bff34e3Sthurlow /* offset_t diff; */ 502*4bff34e3Sthurlow 503*4bff34e3Sthurlow np = VTOSMB(vp); 504*4bff34e3Sthurlow smi = VTOSMI(vp); 505*4bff34e3Sthurlow 506*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 507*4bff34e3Sthurlow return (EIO); 508*4bff34e3Sthurlow 509*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 510*4bff34e3Sthurlow return (EIO); 511*4bff34e3Sthurlow 512*4bff34e3Sthurlow ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER)); 513*4bff34e3Sthurlow 514*4bff34e3Sthurlow if (vp->v_type != VREG) 515*4bff34e3Sthurlow return (EISDIR); 516*4bff34e3Sthurlow 517*4bff34e3Sthurlow if (uiop->uio_resid == 0) 518*4bff34e3Sthurlow return (0); 519*4bff34e3Sthurlow 520*4bff34e3Sthurlow /* 521*4bff34e3Sthurlow * Like NFS3, just check for 63-bit overflow. 522*4bff34e3Sthurlow * Our SMB layer takes care to return EFBIG 523*4bff34e3Sthurlow * when it has to fallback to a 32-bit call. 524*4bff34e3Sthurlow */ 525*4bff34e3Sthurlow if (uiop->uio_loffset < 0 || 526*4bff34e3Sthurlow uiop->uio_loffset + uiop->uio_resid < 0) 527*4bff34e3Sthurlow return (EINVAL); 528*4bff34e3Sthurlow 529*4bff34e3Sthurlow /* Shared lock for n_fid use in smbfs_readvnode */ 530*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp))) 531*4bff34e3Sthurlow return (EINTR); 532*4bff34e3Sthurlow 533*4bff34e3Sthurlow /* get vnode attributes from server */ 534*4bff34e3Sthurlow va.va_mask = AT_SIZE | AT_MTIME; 535*4bff34e3Sthurlow if (error = smbfsgetattr(vp, &va, cr)) 536*4bff34e3Sthurlow goto out; 537*4bff34e3Sthurlow 538*4bff34e3Sthurlow /* should probably update mtime with mtime from server here */ 539*4bff34e3Sthurlow 540*4bff34e3Sthurlow /* 541*4bff34e3Sthurlow * Darwin had a loop here that handled paging stuff. 542*4bff34e3Sthurlow * Solaris does paging differently, so no loop needed. 543*4bff34e3Sthurlow */ 544*4bff34e3Sthurlow error = smbfs_readvnode(vp, uiop, cr, &va); 545*4bff34e3Sthurlow 546*4bff34e3Sthurlow out: 547*4bff34e3Sthurlow smbfs_rw_exit(&np->r_lkserlock); 548*4bff34e3Sthurlow return (error); 549*4bff34e3Sthurlow 550*4bff34e3Sthurlow } 551*4bff34e3Sthurlow 552*4bff34e3Sthurlow 553*4bff34e3Sthurlow /* ARGSUSED */ 554*4bff34e3Sthurlow static int 555*4bff34e3Sthurlow smbfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr, 556*4bff34e3Sthurlow caller_context_t *ct) 557*4bff34e3Sthurlow { 558*4bff34e3Sthurlow int error; 559*4bff34e3Sthurlow smbmntinfo_t *smi; 560*4bff34e3Sthurlow smbnode_t *np; 561*4bff34e3Sthurlow int timo = SMBWRTTIMO; 562*4bff34e3Sthurlow 563*4bff34e3Sthurlow np = VTOSMB(vp); 564*4bff34e3Sthurlow smi = VTOSMI(vp); 565*4bff34e3Sthurlow 566*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 567*4bff34e3Sthurlow return (EIO); 568*4bff34e3Sthurlow 569*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 570*4bff34e3Sthurlow return (EIO); 571*4bff34e3Sthurlow 572*4bff34e3Sthurlow ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER)); 573*4bff34e3Sthurlow 574*4bff34e3Sthurlow if (vp->v_type != VREG) 575*4bff34e3Sthurlow return (EISDIR); 576*4bff34e3Sthurlow 577*4bff34e3Sthurlow if (uiop->uio_resid == 0) 578*4bff34e3Sthurlow return (0); 579*4bff34e3Sthurlow 580*4bff34e3Sthurlow /* Shared lock for n_fid use in smbfs_writevnode */ 581*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp))) 582*4bff34e3Sthurlow return (EINTR); 583*4bff34e3Sthurlow 584*4bff34e3Sthurlow 585*4bff34e3Sthurlow /* 586*4bff34e3Sthurlow * Darwin had a loop here that handled paging stuff. 587*4bff34e3Sthurlow * Solaris does paging differently, so no loop needed. 588*4bff34e3Sthurlow */ 589*4bff34e3Sthurlow error = smbfs_writevnode(vp, uiop, cr, ioflag, timo); 590*4bff34e3Sthurlow 591*4bff34e3Sthurlow smbfs_rw_exit(&np->r_lkserlock); 592*4bff34e3Sthurlow return (error); 593*4bff34e3Sthurlow 594*4bff34e3Sthurlow } 595*4bff34e3Sthurlow 596*4bff34e3Sthurlow 597*4bff34e3Sthurlow /* 598*4bff34e3Sthurlow * Return either cached or remote attributes. If get remote attr 599*4bff34e3Sthurlow * use them to check and invalidate caches, then cache the new attributes. 600*4bff34e3Sthurlow * 601*4bff34e3Sthurlow * XXX 602*4bff34e3Sthurlow * This op should eventually support PSARC 2007/315, Extensible Attribute 603*4bff34e3Sthurlow * Interfaces, for richer metadata. 604*4bff34e3Sthurlow */ 605*4bff34e3Sthurlow /* ARGSUSED */ 606*4bff34e3Sthurlow static int 607*4bff34e3Sthurlow smbfs_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr, 608*4bff34e3Sthurlow caller_context_t *ct) 609*4bff34e3Sthurlow { 610*4bff34e3Sthurlow smbnode_t *np; 611*4bff34e3Sthurlow smbmntinfo_t *smi; 612*4bff34e3Sthurlow 613*4bff34e3Sthurlow smi = VTOSMI(vp); 614*4bff34e3Sthurlow 615*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 616*4bff34e3Sthurlow return (EIO); 617*4bff34e3Sthurlow 618*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 619*4bff34e3Sthurlow return (EIO); 620*4bff34e3Sthurlow 621*4bff34e3Sthurlow /* 622*4bff34e3Sthurlow * If it has been specified that the return value will 623*4bff34e3Sthurlow * just be used as a hint, and we are only being asked 624*4bff34e3Sthurlow * for size, fsid or rdevid, then return the client's 625*4bff34e3Sthurlow * notion of these values without checking to make sure 626*4bff34e3Sthurlow * that the attribute cache is up to date. 627*4bff34e3Sthurlow * The whole point is to avoid an over the wire GETATTR 628*4bff34e3Sthurlow * call. 629*4bff34e3Sthurlow */ 630*4bff34e3Sthurlow np = VTOSMB(vp); 631*4bff34e3Sthurlow if (flags & ATTR_HINT) { 632*4bff34e3Sthurlow if (vap->va_mask == 633*4bff34e3Sthurlow (vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) { 634*4bff34e3Sthurlow mutex_enter(&np->r_statelock); 635*4bff34e3Sthurlow if (vap->va_mask | AT_SIZE) 636*4bff34e3Sthurlow vap->va_size = np->r_size; 637*4bff34e3Sthurlow if (vap->va_mask | AT_FSID) 638*4bff34e3Sthurlow vap->va_fsid = np->r_attr.va_fsid; 639*4bff34e3Sthurlow if (vap->va_mask | AT_RDEV) 640*4bff34e3Sthurlow vap->va_rdev = np->r_attr.va_rdev; 641*4bff34e3Sthurlow mutex_exit(&np->r_statelock); 642*4bff34e3Sthurlow return (0); 643*4bff34e3Sthurlow } 644*4bff34e3Sthurlow } 645*4bff34e3Sthurlow 646*4bff34e3Sthurlow 647*4bff34e3Sthurlow return (smbfsgetattr(vp, vap, cr)); 648*4bff34e3Sthurlow } 649*4bff34e3Sthurlow 650*4bff34e3Sthurlow /* 651*4bff34e3Sthurlow * Mostly from Darwin smbfs_getattr() 652*4bff34e3Sthurlow */ 653*4bff34e3Sthurlow int 654*4bff34e3Sthurlow smbfsgetattr(vnode_t *vp, struct vattr *vap, cred_t *cr) 655*4bff34e3Sthurlow { 656*4bff34e3Sthurlow int error; 657*4bff34e3Sthurlow smbnode_t *np; 658*4bff34e3Sthurlow struct smb_cred scred; 659*4bff34e3Sthurlow struct smbfattr fattr; 660*4bff34e3Sthurlow 661*4bff34e3Sthurlow ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone); 662*4bff34e3Sthurlow 663*4bff34e3Sthurlow np = VTOSMB(vp); 664*4bff34e3Sthurlow 665*4bff34e3Sthurlow /* 666*4bff34e3Sthurlow * If we've got cached attributes, we're done, otherwise go 667*4bff34e3Sthurlow * to the server to get attributes, which will update the cache 668*4bff34e3Sthurlow * in the process. 669*4bff34e3Sthurlow * 670*4bff34e3Sthurlow * This section from Darwin smbfs_getattr, 671*4bff34e3Sthurlow * but then modified a lot. 672*4bff34e3Sthurlow */ 673*4bff34e3Sthurlow error = smbfs_attr_cachelookup(vp, vap); 674*4bff34e3Sthurlow if (error != ENOENT) 675*4bff34e3Sthurlow return (error); 676*4bff34e3Sthurlow 677*4bff34e3Sthurlow /* Shared lock for (possible) n_fid use. */ 678*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp))) 679*4bff34e3Sthurlow return (EINTR); 680*4bff34e3Sthurlow smb_credinit(&scred, curproc, cr); 681*4bff34e3Sthurlow 682*4bff34e3Sthurlow error = smbfs_smb_getfattr(np, &fattr, &scred); 683*4bff34e3Sthurlow 684*4bff34e3Sthurlow smb_credrele(&scred); 685*4bff34e3Sthurlow smbfs_rw_exit(&np->r_lkserlock); 686*4bff34e3Sthurlow 687*4bff34e3Sthurlow if (!error) { 688*4bff34e3Sthurlow smbfs_attr_cacheenter(vp, &fattr); 689*4bff34e3Sthurlow error = smbfs_attr_cachelookup(vp, vap); 690*4bff34e3Sthurlow } 691*4bff34e3Sthurlow return (error); 692*4bff34e3Sthurlow } 693*4bff34e3Sthurlow 694*4bff34e3Sthurlow /* 695*4bff34e3Sthurlow * XXX 696*4bff34e3Sthurlow * This op should eventually support PSARC 2007/315, Extensible Attribute 697*4bff34e3Sthurlow * Interfaces, for richer metadata. 698*4bff34e3Sthurlow */ 699*4bff34e3Sthurlow /*ARGSUSED4*/ 700*4bff34e3Sthurlow static int 701*4bff34e3Sthurlow smbfs_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr, 702*4bff34e3Sthurlow caller_context_t *ct) 703*4bff34e3Sthurlow { 704*4bff34e3Sthurlow int error; 705*4bff34e3Sthurlow uint_t mask; 706*4bff34e3Sthurlow struct vattr oldva; 707*4bff34e3Sthurlow smbmntinfo_t *smi; 708*4bff34e3Sthurlow 709*4bff34e3Sthurlow smi = VTOSMI(vp); 710*4bff34e3Sthurlow 711*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 712*4bff34e3Sthurlow return (EIO); 713*4bff34e3Sthurlow 714*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 715*4bff34e3Sthurlow return (EIO); 716*4bff34e3Sthurlow 717*4bff34e3Sthurlow mask = vap->va_mask; 718*4bff34e3Sthurlow if (mask & AT_NOSET) 719*4bff34e3Sthurlow return (EINVAL); 720*4bff34e3Sthurlow 721*4bff34e3Sthurlow oldva.va_mask = AT_TYPE | AT_MODE | AT_UID | AT_GID; 722*4bff34e3Sthurlow error = smbfsgetattr(vp, &oldva, cr); 723*4bff34e3Sthurlow if (error) 724*4bff34e3Sthurlow return (error); 725*4bff34e3Sthurlow 726*4bff34e3Sthurlow error = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags, 727*4bff34e3Sthurlow smbfs_accessx, vp); 728*4bff34e3Sthurlow if (error) 729*4bff34e3Sthurlow return (error); 730*4bff34e3Sthurlow 731*4bff34e3Sthurlow return (smbfssetattr(vp, vap, flags, cr)); 732*4bff34e3Sthurlow } 733*4bff34e3Sthurlow 734*4bff34e3Sthurlow /* 735*4bff34e3Sthurlow * Mostly from Darwin smbfs_setattr() 736*4bff34e3Sthurlow * but then modified a lot. 737*4bff34e3Sthurlow */ 738*4bff34e3Sthurlow /* ARGSUSED */ 739*4bff34e3Sthurlow static int 740*4bff34e3Sthurlow smbfssetattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr) 741*4bff34e3Sthurlow { 742*4bff34e3Sthurlow int error = 0; 743*4bff34e3Sthurlow smbnode_t *np = VTOSMB(vp); 744*4bff34e3Sthurlow smbmntinfo_t *smi = VTOSMI(vp); 745*4bff34e3Sthurlow uint_t mask = vap->va_mask; 746*4bff34e3Sthurlow struct timespec *mtime, *atime; 747*4bff34e3Sthurlow struct smb_cred scred; 748*4bff34e3Sthurlow int cerror, modified = 0; 749*4bff34e3Sthurlow unsigned short fid; 750*4bff34e3Sthurlow int have_fid = 0; 751*4bff34e3Sthurlow uint32_t rights = 0; 752*4bff34e3Sthurlow 753*4bff34e3Sthurlow ASSERT(curproc->p_zone == smi->smi_zone); 754*4bff34e3Sthurlow 755*4bff34e3Sthurlow /* 756*4bff34e3Sthurlow * If our caller is trying to set multiple attributes, they 757*4bff34e3Sthurlow * can make no assumption about what order they are done in. 758*4bff34e3Sthurlow * Here we try to do them in order of decreasing likelihood 759*4bff34e3Sthurlow * of failure, just to minimize the chance we'll wind up 760*4bff34e3Sthurlow * with a partially complete request. 761*4bff34e3Sthurlow */ 762*4bff34e3Sthurlow 763*4bff34e3Sthurlow /* Shared lock for (possible) n_fid use. */ 764*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp))) 765*4bff34e3Sthurlow return (EINTR); 766*4bff34e3Sthurlow smb_credinit(&scred, curproc, cr); 767*4bff34e3Sthurlow 768*4bff34e3Sthurlow /* 769*4bff34e3Sthurlow * Will we need an open handle for this setattr? 770*4bff34e3Sthurlow * If so, what rights will we need? 771*4bff34e3Sthurlow */ 772*4bff34e3Sthurlow if (mask & (AT_ATIME | AT_MTIME)) { 773*4bff34e3Sthurlow rights |= 774*4bff34e3Sthurlow SA_RIGHT_FILE_WRITE_ATTRIBUTES | 775*4bff34e3Sthurlow GENERIC_RIGHT_ALL_ACCESS | 776*4bff34e3Sthurlow GENERIC_RIGHT_WRITE_ACCESS; 777*4bff34e3Sthurlow } 778*4bff34e3Sthurlow if (mask & AT_SIZE) { 779*4bff34e3Sthurlow rights |= 780*4bff34e3Sthurlow SA_RIGHT_FILE_WRITE_DATA | 781*4bff34e3Sthurlow SA_RIGHT_FILE_APPEND_DATA; 782*4bff34e3Sthurlow /* 783*4bff34e3Sthurlow * Only SIZE requires a handle. 784*4bff34e3Sthurlow * XXX May be more reliable to just 785*4bff34e3Sthurlow * always get the file handle here. 786*4bff34e3Sthurlow */ 787*4bff34e3Sthurlow error = smbfs_smb_tmpopen(np, rights, &scred, &fid); 788*4bff34e3Sthurlow if (error) { 789*4bff34e3Sthurlow SMBVDEBUG("error %d opening %s\n", 790*4bff34e3Sthurlow error, np->n_rpath); 791*4bff34e3Sthurlow goto out; 792*4bff34e3Sthurlow } 793*4bff34e3Sthurlow have_fid = 1; 794*4bff34e3Sthurlow } 795*4bff34e3Sthurlow 796*4bff34e3Sthurlow 797*4bff34e3Sthurlow /* 798*4bff34e3Sthurlow * If the server supports the UNIX extensions, right here is where 799*4bff34e3Sthurlow * we'd support changes to uid, gid, mode, and possibly va_flags. 800*4bff34e3Sthurlow * For now we claim to have made any such changes. 801*4bff34e3Sthurlow */ 802*4bff34e3Sthurlow 803*4bff34e3Sthurlow if (mask & AT_SIZE) { 804*4bff34e3Sthurlow /* 805*4bff34e3Sthurlow * If the new file size is less than what the client sees as 806*4bff34e3Sthurlow * the file size, then just change the size and invalidate 807*4bff34e3Sthurlow * the pages. 808*4bff34e3Sthurlow * I am commenting this code at present because the function 809*4bff34e3Sthurlow * smbfs_putapage() is not yet implemented. 810*4bff34e3Sthurlow */ 811*4bff34e3Sthurlow 812*4bff34e3Sthurlow /* 813*4bff34e3Sthurlow * Set the file size to vap->va_size. 814*4bff34e3Sthurlow */ 815*4bff34e3Sthurlow ASSERT(have_fid); 816*4bff34e3Sthurlow error = smbfs_smb_setfsize(np, fid, vap->va_size, &scred); 817*4bff34e3Sthurlow if (error) { 818*4bff34e3Sthurlow SMBVDEBUG("setsize error %d file %s\n", 819*4bff34e3Sthurlow error, np->n_rpath); 820*4bff34e3Sthurlow } else { 821*4bff34e3Sthurlow /* 822*4bff34e3Sthurlow * Darwin had code here to zero-extend. 823*4bff34e3Sthurlow * Tests indicate the server will zero-fill, 824*4bff34e3Sthurlow * so looks like we don't need to do this. 825*4bff34e3Sthurlow * Good thing, as this could take forever. 826*4bff34e3Sthurlow */ 827*4bff34e3Sthurlow mutex_enter(&np->r_statelock); 828*4bff34e3Sthurlow np->r_size = vap->va_size; 829*4bff34e3Sthurlow mutex_exit(&np->r_statelock); 830*4bff34e3Sthurlow modified = 1; 831*4bff34e3Sthurlow } 832*4bff34e3Sthurlow } 833*4bff34e3Sthurlow 834*4bff34e3Sthurlow /* 835*4bff34e3Sthurlow * XXX: When Solaris has create_time, set that too. 836*4bff34e3Sthurlow * Note: create_time is different from ctime. 837*4bff34e3Sthurlow */ 838*4bff34e3Sthurlow mtime = ((mask & AT_MTIME) ? &vap->va_mtime : 0); 839*4bff34e3Sthurlow atime = ((mask & AT_ATIME) ? &vap->va_atime : 0); 840*4bff34e3Sthurlow 841*4bff34e3Sthurlow if (mtime || atime) { 842*4bff34e3Sthurlow /* 843*4bff34e3Sthurlow * If file is opened with write-attributes capability, 844*4bff34e3Sthurlow * we use handle-based calls. If not, we use path-based ones. 845*4bff34e3Sthurlow */ 846*4bff34e3Sthurlow if (have_fid) { 847*4bff34e3Sthurlow error = smbfs_smb_setfattr(np, fid, 848*4bff34e3Sthurlow np->n_dosattr, mtime, atime, &scred); 849*4bff34e3Sthurlow } else { 850*4bff34e3Sthurlow error = smbfs_smb_setpattr(np, 851*4bff34e3Sthurlow np->n_dosattr, mtime, atime, &scred); 852*4bff34e3Sthurlow } 853*4bff34e3Sthurlow if (error) { 854*4bff34e3Sthurlow SMBVDEBUG("set times error %d file %s\n", 855*4bff34e3Sthurlow error, np->n_rpath); 856*4bff34e3Sthurlow } else { 857*4bff34e3Sthurlow /* XXX: set np->n_mtime, etc? */ 858*4bff34e3Sthurlow modified = 1; 859*4bff34e3Sthurlow } 860*4bff34e3Sthurlow } 861*4bff34e3Sthurlow 862*4bff34e3Sthurlow out: 863*4bff34e3Sthurlow if (modified) { 864*4bff34e3Sthurlow /* 865*4bff34e3Sthurlow * Invalidate attribute cache in case if server doesn't set 866*4bff34e3Sthurlow * required attributes. 867*4bff34e3Sthurlow */ 868*4bff34e3Sthurlow smbfs_attr_cacheremove(np); 869*4bff34e3Sthurlow /* 870*4bff34e3Sthurlow * XXX Darwin called _getattr here to 871*4bff34e3Sthurlow * update the mtime. Should we? 872*4bff34e3Sthurlow */ 873*4bff34e3Sthurlow } 874*4bff34e3Sthurlow 875*4bff34e3Sthurlow if (have_fid) { 876*4bff34e3Sthurlow cerror = smbfs_smb_tmpclose(np, fid, &scred); 877*4bff34e3Sthurlow if (cerror) 878*4bff34e3Sthurlow SMBERROR("error %d closing %s\n", 879*4bff34e3Sthurlow cerror, np->n_rpath); 880*4bff34e3Sthurlow } 881*4bff34e3Sthurlow 882*4bff34e3Sthurlow smb_credrele(&scred); 883*4bff34e3Sthurlow smbfs_rw_exit(&np->r_lkserlock); 884*4bff34e3Sthurlow 885*4bff34e3Sthurlow return (error); 886*4bff34e3Sthurlow } 887*4bff34e3Sthurlow 888*4bff34e3Sthurlow /* 889*4bff34e3Sthurlow * smbfs_access_rwx() 890*4bff34e3Sthurlow * Common function for smbfs_access, etc. 891*4bff34e3Sthurlow * 892*4bff34e3Sthurlow * The security model implemented by the FS is unusual 893*4bff34e3Sthurlow * due to our "single user mounts" restriction. 894*4bff34e3Sthurlow * 895*4bff34e3Sthurlow * All access under a given mount point uses the CIFS 896*4bff34e3Sthurlow * credentials established by the owner of the mount. 897*4bff34e3Sthurlow * The Unix uid/gid/mode information is not (easily) 898*4bff34e3Sthurlow * provided by CIFS, and is instead fabricated using 899*4bff34e3Sthurlow * settings held in the mount structure. 900*4bff34e3Sthurlow * 901*4bff34e3Sthurlow * Most access checking is handled by the CIFS server, 902*4bff34e3Sthurlow * but we need sufficient Unix access checks here to 903*4bff34e3Sthurlow * prevent other local Unix users from having access 904*4bff34e3Sthurlow * to objects under this mount that the uid/gid/mode 905*4bff34e3Sthurlow * settings in the mount would not allow. 906*4bff34e3Sthurlow * 907*4bff34e3Sthurlow * With this model, there is a case where we need the 908*4bff34e3Sthurlow * ability to do an access check before we have the 909*4bff34e3Sthurlow * vnode for an object. This function takes advantage 910*4bff34e3Sthurlow * of the fact that the uid/gid/mode is per mount, and 911*4bff34e3Sthurlow * avoids the need for a vnode. 912*4bff34e3Sthurlow * 913*4bff34e3Sthurlow * We still (sort of) need a vnode when we call 914*4bff34e3Sthurlow * secpolicy_vnode_access, but that only uses 915*4bff34e3Sthurlow * the vtype field, so we can use a pair of fake 916*4bff34e3Sthurlow * vnodes that have only v_type filled in. 917*4bff34e3Sthurlow * 918*4bff34e3Sthurlow * XXX: Later, add a new secpolicy_vtype_access() 919*4bff34e3Sthurlow * that takes the vtype instead of a vnode, and 920*4bff34e3Sthurlow * get rid of the tmpl_vxxx fake vnodes below. 921*4bff34e3Sthurlow */ 922*4bff34e3Sthurlow static int 923*4bff34e3Sthurlow smbfs_access_rwx(vfs_t *vfsp, int vtype, int mode, cred_t *cr) 924*4bff34e3Sthurlow { 925*4bff34e3Sthurlow /* See the secpolicy call below. */ 926*4bff34e3Sthurlow static const vnode_t tmpl_vdir = { .v_type = VDIR }; 927*4bff34e3Sthurlow static const vnode_t tmpl_vreg = { .v_type = VREG }; 928*4bff34e3Sthurlow vattr_t va; 929*4bff34e3Sthurlow vnode_t *tvp; 930*4bff34e3Sthurlow struct smbmntinfo *smi = VFTOSMI(vfsp); 931*4bff34e3Sthurlow int shift = 0; 932*4bff34e3Sthurlow 933*4bff34e3Sthurlow /* 934*4bff34e3Sthurlow * Build our (fabricated) vnode attributes. 935*4bff34e3Sthurlow * XXX: Could make these templates in the 936*4bff34e3Sthurlow * per-mount struct and use them here. 937*4bff34e3Sthurlow */ 938*4bff34e3Sthurlow bzero(&va, sizeof (va)); 939*4bff34e3Sthurlow va.va_mask = AT_TYPE | AT_MODE | AT_UID | AT_GID; 940*4bff34e3Sthurlow va.va_type = vtype; 941*4bff34e3Sthurlow va.va_mode = (vtype == VDIR) ? 942*4bff34e3Sthurlow smi->smi_args.dir_mode : 943*4bff34e3Sthurlow smi->smi_args.file_mode; 944*4bff34e3Sthurlow va.va_uid = smi->smi_args.uid; 945*4bff34e3Sthurlow va.va_gid = smi->smi_args.gid; 946*4bff34e3Sthurlow 947*4bff34e3Sthurlow /* 948*4bff34e3Sthurlow * Disallow write attempts on read-only file systems, 949*4bff34e3Sthurlow * unless the file is a device or fifo node. Note: 950*4bff34e3Sthurlow * Inline vn_is_readonly and IS_DEVVP here because 951*4bff34e3Sthurlow * we may not have a vnode ptr. Original expr. was: 952*4bff34e3Sthurlow * (mode & VWRITE) && vn_is_readonly(vp) && !IS_DEVVP(vp)) 953*4bff34e3Sthurlow */ 954*4bff34e3Sthurlow if ((mode & VWRITE) && 955*4bff34e3Sthurlow (vfsp->vfs_flag & VFS_RDONLY) && 956*4bff34e3Sthurlow !(vtype == VCHR || vtype == VBLK || vtype == VFIFO)) 957*4bff34e3Sthurlow return (EROFS); 958*4bff34e3Sthurlow 959*4bff34e3Sthurlow /* 960*4bff34e3Sthurlow * Disallow attempts to access mandatory lock files. 961*4bff34e3Sthurlow * Similarly, expand MANDLOCK here. 962*4bff34e3Sthurlow * XXX: not sure we need this. 963*4bff34e3Sthurlow */ 964*4bff34e3Sthurlow if ((mode & (VWRITE | VREAD | VEXEC)) && 965*4bff34e3Sthurlow va.va_type == VREG && MANDMODE(va.va_mode)) 966*4bff34e3Sthurlow return (EACCES); 967*4bff34e3Sthurlow 968*4bff34e3Sthurlow /* 969*4bff34e3Sthurlow * Access check is based on only 970*4bff34e3Sthurlow * one of owner, group, public. 971*4bff34e3Sthurlow * If not owner, then check group. 972*4bff34e3Sthurlow * If not a member of the group, 973*4bff34e3Sthurlow * then check public access. 974*4bff34e3Sthurlow */ 975*4bff34e3Sthurlow if (crgetuid(cr) != va.va_uid) { 976*4bff34e3Sthurlow shift += 3; 977*4bff34e3Sthurlow if (!groupmember(va.va_gid, cr)) 978*4bff34e3Sthurlow shift += 3; 979*4bff34e3Sthurlow } 980*4bff34e3Sthurlow mode &= ~(va.va_mode << shift); 981*4bff34e3Sthurlow if (mode == 0) 982*4bff34e3Sthurlow return (0); 983*4bff34e3Sthurlow 984*4bff34e3Sthurlow /* 985*4bff34e3Sthurlow * We need a vnode for secpolicy_vnode_access, 986*4bff34e3Sthurlow * but the only thing it looks at is v_type, 987*4bff34e3Sthurlow * so pass one of the templates above. 988*4bff34e3Sthurlow */ 989*4bff34e3Sthurlow tvp = (va.va_type == VDIR) ? 990*4bff34e3Sthurlow (vnode_t *)&tmpl_vdir : 991*4bff34e3Sthurlow (vnode_t *)&tmpl_vreg; 992*4bff34e3Sthurlow return (secpolicy_vnode_access(cr, tvp, va.va_uid, mode)); 993*4bff34e3Sthurlow } 994*4bff34e3Sthurlow 995*4bff34e3Sthurlow /* 996*4bff34e3Sthurlow * See smbfs_setattr 997*4bff34e3Sthurlow */ 998*4bff34e3Sthurlow static int 999*4bff34e3Sthurlow smbfs_accessx(void *arg, int mode, cred_t *cr) 1000*4bff34e3Sthurlow { 1001*4bff34e3Sthurlow vnode_t *vp = arg; 1002*4bff34e3Sthurlow /* 1003*4bff34e3Sthurlow * Note: The caller has checked the current zone, 1004*4bff34e3Sthurlow * the SMI_DEAD and VFS_UNMOUNTED flags, etc. 1005*4bff34e3Sthurlow */ 1006*4bff34e3Sthurlow return (smbfs_access_rwx(vp->v_vfsp, vp->v_type, mode, cr)); 1007*4bff34e3Sthurlow } 1008*4bff34e3Sthurlow 1009*4bff34e3Sthurlow /* 1010*4bff34e3Sthurlow * XXX 1011*4bff34e3Sthurlow * This op should support PSARC 2007/403, Modified Access Checks for CIFS 1012*4bff34e3Sthurlow */ 1013*4bff34e3Sthurlow /* ARGSUSED */ 1014*4bff34e3Sthurlow static int 1015*4bff34e3Sthurlow smbfs_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct) 1016*4bff34e3Sthurlow { 1017*4bff34e3Sthurlow vfs_t *vfsp; 1018*4bff34e3Sthurlow smbmntinfo_t *smi; 1019*4bff34e3Sthurlow 1020*4bff34e3Sthurlow vfsp = vp->v_vfsp; 1021*4bff34e3Sthurlow smi = VFTOSMI(vfsp); 1022*4bff34e3Sthurlow 1023*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 1024*4bff34e3Sthurlow return (EIO); 1025*4bff34e3Sthurlow 1026*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED) 1027*4bff34e3Sthurlow return (EIO); 1028*4bff34e3Sthurlow 1029*4bff34e3Sthurlow return (smbfs_access_rwx(vfsp, vp->v_type, mode, cr)); 1030*4bff34e3Sthurlow } 1031*4bff34e3Sthurlow 1032*4bff34e3Sthurlow 1033*4bff34e3Sthurlow /* 1034*4bff34e3Sthurlow * Flush local dirty pages to stable storage on the server. 1035*4bff34e3Sthurlow * 1036*4bff34e3Sthurlow * If FNODSYNC is specified, then there is nothing to do because 1037*4bff34e3Sthurlow * metadata changes are not cached on the client before being 1038*4bff34e3Sthurlow * sent to the server. 1039*4bff34e3Sthurlow * 1040*4bff34e3Sthurlow * Currently, this is a no-op since we don't cache data, either. 1041*4bff34e3Sthurlow */ 1042*4bff34e3Sthurlow /* ARGSUSED */ 1043*4bff34e3Sthurlow static int 1044*4bff34e3Sthurlow smbfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct) 1045*4bff34e3Sthurlow { 1046*4bff34e3Sthurlow int error = 0; 1047*4bff34e3Sthurlow smbmntinfo_t *smi; 1048*4bff34e3Sthurlow 1049*4bff34e3Sthurlow smi = VTOSMI(vp); 1050*4bff34e3Sthurlow 1051*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 1052*4bff34e3Sthurlow return (EIO); 1053*4bff34e3Sthurlow 1054*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 1055*4bff34e3Sthurlow return (EIO); 1056*4bff34e3Sthurlow 1057*4bff34e3Sthurlow if ((syncflag & FNODSYNC) || IS_SWAPVP(vp)) 1058*4bff34e3Sthurlow return (0); 1059*4bff34e3Sthurlow 1060*4bff34e3Sthurlow return (error); 1061*4bff34e3Sthurlow } 1062*4bff34e3Sthurlow 1063*4bff34e3Sthurlow /* 1064*4bff34e3Sthurlow * Last reference to vnode went away. 1065*4bff34e3Sthurlow */ 1066*4bff34e3Sthurlow /* ARGSUSED */ 1067*4bff34e3Sthurlow static void 1068*4bff34e3Sthurlow smbfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) 1069*4bff34e3Sthurlow { 1070*4bff34e3Sthurlow smbnode_t *np; 1071*4bff34e3Sthurlow 1072*4bff34e3Sthurlow /* 1073*4bff34e3Sthurlow * Don't "bail out" for VFS_UNMOUNTED here, 1074*4bff34e3Sthurlow * as we want to do cleanup, etc. 1075*4bff34e3Sthurlow * See also pcfs_inactive 1076*4bff34e3Sthurlow */ 1077*4bff34e3Sthurlow 1078*4bff34e3Sthurlow np = VTOSMB(vp); 1079*4bff34e3Sthurlow 1080*4bff34e3Sthurlow /* 1081*4bff34e3Sthurlow * If this is coming from the wrong zone, we let someone in the right 1082*4bff34e3Sthurlow * zone take care of it asynchronously. We can get here due to 1083*4bff34e3Sthurlow * VN_RELE() being called from pageout() or fsflush(). This call may 1084*4bff34e3Sthurlow * potentially turn into an expensive no-op if, for instance, v_count 1085*4bff34e3Sthurlow * gets incremented in the meantime, but it's still correct. 1086*4bff34e3Sthurlow */ 1087*4bff34e3Sthurlow 1088*4bff34e3Sthurlow /* 1089*4bff34e3Sthurlow * Some paranoia from the Darwin code: 1090*4bff34e3Sthurlow * Make sure the FID was closed. 1091*4bff34e3Sthurlow * If we see this, it's a bug! 1092*4bff34e3Sthurlow * 1093*4bff34e3Sthurlow * No rw_enter here, as this should be the 1094*4bff34e3Sthurlow * last ref, and we're just looking... 1095*4bff34e3Sthurlow */ 1096*4bff34e3Sthurlow if (np->n_fidrefs > 0) { 1097*4bff34e3Sthurlow SMBVDEBUG("opencount %d fid %d file %s\n", 1098*4bff34e3Sthurlow np->n_fidrefs, np->n_fid, np->n_rpath); 1099*4bff34e3Sthurlow } 1100*4bff34e3Sthurlow if (np->n_dirrefs > 0) { 1101*4bff34e3Sthurlow uint_t fid = (np->n_dirseq) ? 1102*4bff34e3Sthurlow np->n_dirseq->f_Sid : 0; 1103*4bff34e3Sthurlow SMBVDEBUG("opencount %d fid %d dir %s\n", 1104*4bff34e3Sthurlow np->n_dirrefs, fid, np->n_rpath); 1105*4bff34e3Sthurlow } 1106*4bff34e3Sthurlow 1107*4bff34e3Sthurlow smb_addfree(np); 1108*4bff34e3Sthurlow } 1109*4bff34e3Sthurlow 1110*4bff34e3Sthurlow /* 1111*4bff34e3Sthurlow * Remote file system operations having to do with directory manipulation. 1112*4bff34e3Sthurlow */ 1113*4bff34e3Sthurlow /* ARGSUSED */ 1114*4bff34e3Sthurlow static int 1115*4bff34e3Sthurlow smbfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, 1116*4bff34e3Sthurlow int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, 1117*4bff34e3Sthurlow int *direntflags, pathname_t *realpnp) 1118*4bff34e3Sthurlow { 1119*4bff34e3Sthurlow int error; 1120*4bff34e3Sthurlow smbnode_t *dnp; 1121*4bff34e3Sthurlow smbmntinfo_t *smi; 1122*4bff34e3Sthurlow 1123*4bff34e3Sthurlow smi = VTOSMI(dvp); 1124*4bff34e3Sthurlow 1125*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 1126*4bff34e3Sthurlow return (EPERM); 1127*4bff34e3Sthurlow 1128*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 1129*4bff34e3Sthurlow return (EIO); 1130*4bff34e3Sthurlow 1131*4bff34e3Sthurlow dnp = VTOSMB(dvp); 1132*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_READER, SMBINTR(dvp))) { 1133*4bff34e3Sthurlow error = EINTR; 1134*4bff34e3Sthurlow goto out; 1135*4bff34e3Sthurlow } 1136*4bff34e3Sthurlow 1137*4bff34e3Sthurlow error = smbfslookup(dvp, nm, vpp, cr, 1, ct); 1138*4bff34e3Sthurlow 1139*4bff34e3Sthurlow smbfs_rw_exit(&dnp->r_rwlock); 1140*4bff34e3Sthurlow 1141*4bff34e3Sthurlow out: 1142*4bff34e3Sthurlow return (error); 1143*4bff34e3Sthurlow } 1144*4bff34e3Sthurlow 1145*4bff34e3Sthurlow /* ARGSUSED */ 1146*4bff34e3Sthurlow static int 1147*4bff34e3Sthurlow smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr, int dnlc, 1148*4bff34e3Sthurlow caller_context_t *ct) 1149*4bff34e3Sthurlow { 1150*4bff34e3Sthurlow int error; 1151*4bff34e3Sthurlow int supplen; /* supported length */ 1152*4bff34e3Sthurlow vnode_t *vp; 1153*4bff34e3Sthurlow smbnode_t *dnp; 1154*4bff34e3Sthurlow smbmntinfo_t *smi; 1155*4bff34e3Sthurlow /* struct smb_vc *vcp; */ 1156*4bff34e3Sthurlow const char *name = (const char *)nm; 1157*4bff34e3Sthurlow int nmlen = strlen(nm); 1158*4bff34e3Sthurlow int rplen; 1159*4bff34e3Sthurlow struct smb_cred scred; 1160*4bff34e3Sthurlow struct smbfattr fa; 1161*4bff34e3Sthurlow 1162*4bff34e3Sthurlow smi = VTOSMI(dvp); 1163*4bff34e3Sthurlow dnp = VTOSMB(dvp); 1164*4bff34e3Sthurlow 1165*4bff34e3Sthurlow ASSERT(curproc->p_zone == smi->smi_zone); 1166*4bff34e3Sthurlow 1167*4bff34e3Sthurlow #ifdef NOT_YET 1168*4bff34e3Sthurlow vcp = SSTOVC(smi->smi_share); 1169*4bff34e3Sthurlow 1170*4bff34e3Sthurlow /* XXX: Should compute this once and store it in smbmntinfo_t */ 1171*4bff34e3Sthurlow supplen = (SMB_DIALECT(vcp) >= SMB_DIALECT_LANMAN2_0) ? 255 : 12; 1172*4bff34e3Sthurlow #else 1173*4bff34e3Sthurlow supplen = 255; 1174*4bff34e3Sthurlow #endif 1175*4bff34e3Sthurlow 1176*4bff34e3Sthurlow /* 1177*4bff34e3Sthurlow * RWlock must be held, either reader or writer. 1178*4bff34e3Sthurlow * XXX: Can we check without looking directly 1179*4bff34e3Sthurlow * inside the struct smbfs_rwlock_t? 1180*4bff34e3Sthurlow */ 1181*4bff34e3Sthurlow ASSERT(dnp->r_rwlock.count != 0); 1182*4bff34e3Sthurlow 1183*4bff34e3Sthurlow /* 1184*4bff34e3Sthurlow * If lookup is for "", just return dvp. Don't need 1185*4bff34e3Sthurlow * to send it over the wire, look it up in the dnlc, 1186*4bff34e3Sthurlow * or perform any access checks. 1187*4bff34e3Sthurlow */ 1188*4bff34e3Sthurlow if (nmlen == 0) { 1189*4bff34e3Sthurlow VN_HOLD(dvp); 1190*4bff34e3Sthurlow *vpp = dvp; 1191*4bff34e3Sthurlow return (0); 1192*4bff34e3Sthurlow } 1193*4bff34e3Sthurlow 1194*4bff34e3Sthurlow /* if the name is longer that what is supported, return an error */ 1195*4bff34e3Sthurlow if (nmlen > supplen) 1196*4bff34e3Sthurlow return (ENAMETOOLONG); 1197*4bff34e3Sthurlow 1198*4bff34e3Sthurlow /* 1199*4bff34e3Sthurlow * Avoid surprises with characters that are 1200*4bff34e3Sthurlow * illegal in Windows file names. 1201*4bff34e3Sthurlow * Todo: CATIA mappings XXX 1202*4bff34e3Sthurlow */ 1203*4bff34e3Sthurlow if (strpbrk(nm, illegal_chars)) 1204*4bff34e3Sthurlow return (EINVAL); 1205*4bff34e3Sthurlow 1206*4bff34e3Sthurlow /* if the dvp is not a directory, return an error */ 1207*4bff34e3Sthurlow if (dvp->v_type != VDIR) 1208*4bff34e3Sthurlow return (ENOTDIR); 1209*4bff34e3Sthurlow 1210*4bff34e3Sthurlow /* Need search permission in the directory. */ 1211*4bff34e3Sthurlow error = smbfs_access(dvp, VEXEC, 0, cr, ct); 1212*4bff34e3Sthurlow if (error) 1213*4bff34e3Sthurlow return (error); 1214*4bff34e3Sthurlow 1215*4bff34e3Sthurlow /* 1216*4bff34e3Sthurlow * If lookup is for ".", just return dvp. Don't need 1217*4bff34e3Sthurlow * to send it over the wire or look it up in the dnlc, 1218*4bff34e3Sthurlow * just need to check access (done above). 1219*4bff34e3Sthurlow */ 1220*4bff34e3Sthurlow if (nmlen == 1 && name[0] == '.') { 1221*4bff34e3Sthurlow VN_HOLD(dvp); 1222*4bff34e3Sthurlow *vpp = dvp; 1223*4bff34e3Sthurlow return (0); 1224*4bff34e3Sthurlow } 1225*4bff34e3Sthurlow 1226*4bff34e3Sthurlow #ifdef NOT_YET 1227*4bff34e3Sthurlow if (dnlc) { 1228*4bff34e3Sthurlow /* 1229*4bff34e3Sthurlow * NOTE: search the dnlc here 1230*4bff34e3Sthurlow */ 1231*4bff34e3Sthurlow } 1232*4bff34e3Sthurlow #endif 1233*4bff34e3Sthurlow 1234*4bff34e3Sthurlow /* 1235*4bff34e3Sthurlow * Handle lookup of ".." which is quite tricky, 1236*4bff34e3Sthurlow * because the protocol gives us little help. 1237*4bff34e3Sthurlow * 1238*4bff34e3Sthurlow * We keep full pathnames (as seen on the server) 1239*4bff34e3Sthurlow * so we can just trim off the last component to 1240*4bff34e3Sthurlow * get the full pathname of the parent. Note: 1241*4bff34e3Sthurlow * We don't actually copy and modify, but just 1242*4bff34e3Sthurlow * compute the trimmed length and pass that with 1243*4bff34e3Sthurlow * the current dir path (not null terminated). 1244*4bff34e3Sthurlow * 1245*4bff34e3Sthurlow * We don't go over-the-wire to get attributes 1246*4bff34e3Sthurlow * for ".." because we know it's a directory, 1247*4bff34e3Sthurlow * and we can just leave the rest "stale" 1248*4bff34e3Sthurlow * until someone does a getattr. 1249*4bff34e3Sthurlow */ 1250*4bff34e3Sthurlow if (nmlen == 2 && name[0] == '.' && name[1] == '.') { 1251*4bff34e3Sthurlow if (dvp->v_flag & VROOT) { 1252*4bff34e3Sthurlow /* 1253*4bff34e3Sthurlow * Already at the root. This can happen 1254*4bff34e3Sthurlow * with directory listings at the root, 1255*4bff34e3Sthurlow * which lookup "." and ".." to get the 1256*4bff34e3Sthurlow * inode numbers. Let ".." be the same 1257*4bff34e3Sthurlow * as "." in the FS root. 1258*4bff34e3Sthurlow */ 1259*4bff34e3Sthurlow VN_HOLD(dvp); 1260*4bff34e3Sthurlow *vpp = dvp; 1261*4bff34e3Sthurlow return (0); 1262*4bff34e3Sthurlow } 1263*4bff34e3Sthurlow 1264*4bff34e3Sthurlow /* 1265*4bff34e3Sthurlow * Find the parent path length. 1266*4bff34e3Sthurlow */ 1267*4bff34e3Sthurlow rplen = dnp->n_rplen; 1268*4bff34e3Sthurlow ASSERT(rplen > 0); 1269*4bff34e3Sthurlow while (--rplen >= 0) { 1270*4bff34e3Sthurlow if (dnp->n_rpath[rplen] == '\\') 1271*4bff34e3Sthurlow break; 1272*4bff34e3Sthurlow } 1273*4bff34e3Sthurlow if (rplen == 0) { 1274*4bff34e3Sthurlow /* Found our way to the root. */ 1275*4bff34e3Sthurlow vp = SMBTOV(smi->smi_root); 1276*4bff34e3Sthurlow VN_HOLD(vp); 1277*4bff34e3Sthurlow *vpp = vp; 1278*4bff34e3Sthurlow return (0); 1279*4bff34e3Sthurlow } 1280*4bff34e3Sthurlow vp = smbfs_make_node(dvp->v_vfsp, 1281*4bff34e3Sthurlow dnp->n_rpath, rplen, 1282*4bff34e3Sthurlow NULL, 0, NULL); 1283*4bff34e3Sthurlow if (vp == NULL) { 1284*4bff34e3Sthurlow return (ENOENT); 1285*4bff34e3Sthurlow } 1286*4bff34e3Sthurlow vp->v_type = VDIR; 1287*4bff34e3Sthurlow 1288*4bff34e3Sthurlow /* Success! */ 1289*4bff34e3Sthurlow *vpp = vp; 1290*4bff34e3Sthurlow return (0); 1291*4bff34e3Sthurlow } 1292*4bff34e3Sthurlow 1293*4bff34e3Sthurlow /* 1294*4bff34e3Sthurlow * Normal lookup of a child node. 1295*4bff34e3Sthurlow * Note we handled "." and ".." above. 1296*4bff34e3Sthurlow * 1297*4bff34e3Sthurlow * First, go over-the-wire to get the 1298*4bff34e3Sthurlow * node type (and attributes). 1299*4bff34e3Sthurlow */ 1300*4bff34e3Sthurlow smb_credinit(&scred, curproc, cr); 1301*4bff34e3Sthurlow /* Note: this can allocate a new "name" */ 1302*4bff34e3Sthurlow error = smbfs_smb_lookup(dnp, &name, &nmlen, &fa, &scred); 1303*4bff34e3Sthurlow smb_credrele(&scred); 1304*4bff34e3Sthurlow if (error) 1305*4bff34e3Sthurlow goto out; 1306*4bff34e3Sthurlow 1307*4bff34e3Sthurlow /* 1308*4bff34e3Sthurlow * Find or create the node. 1309*4bff34e3Sthurlow */ 1310*4bff34e3Sthurlow error = smbfs_nget(dvp, name, nmlen, &fa, &vp); 1311*4bff34e3Sthurlow if (error) 1312*4bff34e3Sthurlow goto out; 1313*4bff34e3Sthurlow 1314*4bff34e3Sthurlow /* Success! */ 1315*4bff34e3Sthurlow *vpp = vp; 1316*4bff34e3Sthurlow 1317*4bff34e3Sthurlow out: 1318*4bff34e3Sthurlow /* smbfs_smb_lookup may have allocated name. */ 1319*4bff34e3Sthurlow if (name != nm) 1320*4bff34e3Sthurlow smbfs_name_free(name, nmlen); 1321*4bff34e3Sthurlow 1322*4bff34e3Sthurlow return (error); 1323*4bff34e3Sthurlow } 1324*4bff34e3Sthurlow 1325*4bff34e3Sthurlow /* 1326*4bff34e3Sthurlow * XXX 1327*4bff34e3Sthurlow * vsecattr_t is new to build 77, and we need to eventually support 1328*4bff34e3Sthurlow * it in order to create an ACL when an object is created. 1329*4bff34e3Sthurlow * 1330*4bff34e3Sthurlow * This op should support the new FIGNORECASE flag for case-insensitive 1331*4bff34e3Sthurlow * lookups, per PSARC 2007/244. 1332*4bff34e3Sthurlow */ 1333*4bff34e3Sthurlow /* ARGSUSED */ 1334*4bff34e3Sthurlow static int 1335*4bff34e3Sthurlow smbfs_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive, 1336*4bff34e3Sthurlow int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct, 1337*4bff34e3Sthurlow vsecattr_t *vsecp) 1338*4bff34e3Sthurlow { 1339*4bff34e3Sthurlow int error; 1340*4bff34e3Sthurlow int cerror; 1341*4bff34e3Sthurlow vfs_t *vfsp; 1342*4bff34e3Sthurlow vnode_t *vp; 1343*4bff34e3Sthurlow #ifdef NOT_YET 1344*4bff34e3Sthurlow smbnode_t *np; 1345*4bff34e3Sthurlow #endif 1346*4bff34e3Sthurlow smbnode_t *dnp; 1347*4bff34e3Sthurlow smbmntinfo_t *smi; 1348*4bff34e3Sthurlow struct vattr vattr; 1349*4bff34e3Sthurlow struct smbfattr fattr; 1350*4bff34e3Sthurlow struct smb_cred scred; 1351*4bff34e3Sthurlow const char *name = (const char *)nm; 1352*4bff34e3Sthurlow int nmlen = strlen(nm); 1353*4bff34e3Sthurlow uint32_t disp; 1354*4bff34e3Sthurlow uint16_t fid; 1355*4bff34e3Sthurlow 1356*4bff34e3Sthurlow vfsp = dvp->v_vfsp; 1357*4bff34e3Sthurlow smi = VFTOSMI(vfsp); 1358*4bff34e3Sthurlow dnp = VTOSMB(dvp); 1359*4bff34e3Sthurlow vp = NULL; 1360*4bff34e3Sthurlow 1361*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 1362*4bff34e3Sthurlow return (EPERM); 1363*4bff34e3Sthurlow 1364*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED) 1365*4bff34e3Sthurlow return (EIO); 1366*4bff34e3Sthurlow 1367*4bff34e3Sthurlow /* 1368*4bff34e3Sthurlow * Note: this may break mknod(2) calls to create a directory, 1369*4bff34e3Sthurlow * but that's obscure use. Some other filesystems do this. 1370*4bff34e3Sthurlow * XXX: Later, redirect VDIR type here to _mkdir. 1371*4bff34e3Sthurlow */ 1372*4bff34e3Sthurlow if (va->va_type != VREG) 1373*4bff34e3Sthurlow return (EINVAL); 1374*4bff34e3Sthurlow 1375*4bff34e3Sthurlow /* 1376*4bff34e3Sthurlow * If the pathname is "", just use dvp, no checks. 1377*4bff34e3Sthurlow * Do this outside of the rwlock (like zfs). 1378*4bff34e3Sthurlow */ 1379*4bff34e3Sthurlow if (nmlen == 0) { 1380*4bff34e3Sthurlow VN_HOLD(dvp); 1381*4bff34e3Sthurlow *vpp = dvp; 1382*4bff34e3Sthurlow return (0); 1383*4bff34e3Sthurlow } 1384*4bff34e3Sthurlow 1385*4bff34e3Sthurlow /* Don't allow "." or ".." through here. */ 1386*4bff34e3Sthurlow if ((nmlen == 1 && name[0] == '.') || 1387*4bff34e3Sthurlow (nmlen == 2 && name[0] == '.' && name[1] == '.')) 1388*4bff34e3Sthurlow return (EISDIR); 1389*4bff34e3Sthurlow 1390*4bff34e3Sthurlow /* 1391*4bff34e3Sthurlow * We make a copy of the attributes because the caller does not 1392*4bff34e3Sthurlow * expect us to change what va points to. 1393*4bff34e3Sthurlow */ 1394*4bff34e3Sthurlow vattr = *va; 1395*4bff34e3Sthurlow 1396*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp))) 1397*4bff34e3Sthurlow return (EINTR); 1398*4bff34e3Sthurlow smb_credinit(&scred, curproc, cr); 1399*4bff34e3Sthurlow 1400*4bff34e3Sthurlow /* 1401*4bff34e3Sthurlow * XXX: Do we need r_lkserlock too? 1402*4bff34e3Sthurlow * No use of any shared fid or fctx... 1403*4bff34e3Sthurlow */ 1404*4bff34e3Sthurlow 1405*4bff34e3Sthurlow /* 1406*4bff34e3Sthurlow * NFS needs to go over the wire, just to be sure whether the 1407*4bff34e3Sthurlow * file exists or not. Using the DNLC can be dangerous in 1408*4bff34e3Sthurlow * this case when making a decision regarding existence. 1409*4bff34e3Sthurlow * 1410*4bff34e3Sthurlow * The SMB protocol does NOT really need to go OTW here 1411*4bff34e3Sthurlow * thanks to the expressive NTCREATE disposition values. 1412*4bff34e3Sthurlow * Unfortunately, to do Unix access checks correctly, 1413*4bff34e3Sthurlow * we need to know if the object already exists. 1414*4bff34e3Sthurlow * When the object does not exist, we need VWRITE on 1415*4bff34e3Sthurlow * the directory. Note: smbfslookup() checks VEXEC. 1416*4bff34e3Sthurlow */ 1417*4bff34e3Sthurlow error = smbfslookup(dvp, nm, &vp, cr, 0, ct); 1418*4bff34e3Sthurlow if (error == 0) { 1419*4bff34e3Sthurlow /* 1420*4bff34e3Sthurlow * file already exists 1421*4bff34e3Sthurlow */ 1422*4bff34e3Sthurlow if (exclusive == EXCL) { 1423*4bff34e3Sthurlow error = EEXIST; 1424*4bff34e3Sthurlow goto out; 1425*4bff34e3Sthurlow } 1426*4bff34e3Sthurlow /* 1427*4bff34e3Sthurlow * Verify requested access. 1428*4bff34e3Sthurlow */ 1429*4bff34e3Sthurlow error = smbfs_access(vp, mode, 0, cr, ct); 1430*4bff34e3Sthurlow if (error) 1431*4bff34e3Sthurlow goto out; 1432*4bff34e3Sthurlow 1433*4bff34e3Sthurlow /* 1434*4bff34e3Sthurlow * Truncate (if requested). 1435*4bff34e3Sthurlow */ 1436*4bff34e3Sthurlow if ((vattr.va_mask & AT_SIZE) && vattr.va_size == 0) { 1437*4bff34e3Sthurlow vattr.va_mask = AT_SIZE; 1438*4bff34e3Sthurlow error = smbfssetattr(vp, &vattr, 0, cr); 1439*4bff34e3Sthurlow if (error) 1440*4bff34e3Sthurlow goto out; 1441*4bff34e3Sthurlow } 1442*4bff34e3Sthurlow /* Success! */ 1443*4bff34e3Sthurlow #ifdef NOT_YET 1444*4bff34e3Sthurlow vnevent_create(vp, ct); 1445*4bff34e3Sthurlow #endif 1446*4bff34e3Sthurlow *vpp = vp; 1447*4bff34e3Sthurlow goto out; 1448*4bff34e3Sthurlow } 1449*4bff34e3Sthurlow 1450*4bff34e3Sthurlow /* 1451*4bff34e3Sthurlow * The file did not exist. Need VWRITE in the directory. 1452*4bff34e3Sthurlow */ 1453*4bff34e3Sthurlow error = smbfs_access(dvp, VWRITE, 0, cr, ct); 1454*4bff34e3Sthurlow if (error) 1455*4bff34e3Sthurlow goto out; 1456*4bff34e3Sthurlow 1457*4bff34e3Sthurlow /* 1458*4bff34e3Sthurlow * Now things get tricky. We also need to check the 1459*4bff34e3Sthurlow * requested open mode against the file we may create. 1460*4bff34e3Sthurlow * See comments at smbfs_access_rwx 1461*4bff34e3Sthurlow */ 1462*4bff34e3Sthurlow error = smbfs_access_rwx(vfsp, VREG, mode, cr); 1463*4bff34e3Sthurlow if (error) 1464*4bff34e3Sthurlow goto out; 1465*4bff34e3Sthurlow 1466*4bff34e3Sthurlow #ifdef NOT_YET 1467*4bff34e3Sthurlow /* remove the entry from the negative entry from the dnlc */ 1468*4bff34e3Sthurlow dnlc_remove(dvp, name); 1469*4bff34e3Sthurlow #endif 1470*4bff34e3Sthurlow 1471*4bff34e3Sthurlow /* 1472*4bff34e3Sthurlow * Now the code derived from Darwin, 1473*4bff34e3Sthurlow * but with greater use of NT_CREATE 1474*4bff34e3Sthurlow * disposition options. Much changed. 1475*4bff34e3Sthurlow * 1476*4bff34e3Sthurlow * Create (or open) a new child node. 1477*4bff34e3Sthurlow * Note we handled "." and ".." above. 1478*4bff34e3Sthurlow */ 1479*4bff34e3Sthurlow 1480*4bff34e3Sthurlow if (exclusive == EXCL) 1481*4bff34e3Sthurlow disp = NTCREATEX_DISP_CREATE; 1482*4bff34e3Sthurlow else { 1483*4bff34e3Sthurlow /* Truncate regular files if requested. */ 1484*4bff34e3Sthurlow if ((va->va_type == VREG) && 1485*4bff34e3Sthurlow (va->va_mask & AT_SIZE) && 1486*4bff34e3Sthurlow (va->va_size == 0)) 1487*4bff34e3Sthurlow disp = NTCREATEX_DISP_OVERWRITE_IF; 1488*4bff34e3Sthurlow else 1489*4bff34e3Sthurlow disp = NTCREATEX_DISP_OPEN_IF; 1490*4bff34e3Sthurlow } 1491*4bff34e3Sthurlow error = smbfs_smb_create(dnp, name, nmlen, &scred, &fid, disp, 0); 1492*4bff34e3Sthurlow if (error) 1493*4bff34e3Sthurlow goto out; 1494*4bff34e3Sthurlow 1495*4bff34e3Sthurlow /* 1496*4bff34e3Sthurlow * XXX: Missing some code here to deal with 1497*4bff34e3Sthurlow * the case where we opened an existing file, 1498*4bff34e3Sthurlow * it's size is larger than 32-bits, and we're 1499*4bff34e3Sthurlow * setting the size from a process that's not 1500*4bff34e3Sthurlow * aware of large file offsets. i.e. 1501*4bff34e3Sthurlow * from the NFS3 code: 1502*4bff34e3Sthurlow */ 1503*4bff34e3Sthurlow #if NOT_YET /* XXX */ 1504*4bff34e3Sthurlow if ((vattr.va_mask & AT_SIZE) && 1505*4bff34e3Sthurlow vp->v_type == VREG) { 1506*4bff34e3Sthurlow np = VTOSMB(vp); 1507*4bff34e3Sthurlow /* 1508*4bff34e3Sthurlow * Check here for large file handled 1509*4bff34e3Sthurlow * by LF-unaware process (as 1510*4bff34e3Sthurlow * ufs_create() does) 1511*4bff34e3Sthurlow */ 1512*4bff34e3Sthurlow if (!(lfaware & FOFFMAX)) { 1513*4bff34e3Sthurlow mutex_enter(&np->r_statelock); 1514*4bff34e3Sthurlow if (np->r_size > MAXOFF32_T) 1515*4bff34e3Sthurlow error = EOVERFLOW; 1516*4bff34e3Sthurlow mutex_exit(&np->r_statelock); 1517*4bff34e3Sthurlow } 1518*4bff34e3Sthurlow if (!error) { 1519*4bff34e3Sthurlow vattr.va_mask = AT_SIZE; 1520*4bff34e3Sthurlow error = smbfssetattr(vp, 1521*4bff34e3Sthurlow &vattr, 0, cr); 1522*4bff34e3Sthurlow } 1523*4bff34e3Sthurlow } 1524*4bff34e3Sthurlow #endif /* XXX */ 1525*4bff34e3Sthurlow /* 1526*4bff34e3Sthurlow * Should use the fid to get/set the size 1527*4bff34e3Sthurlow * while we have it opened here. See above. 1528*4bff34e3Sthurlow */ 1529*4bff34e3Sthurlow 1530*4bff34e3Sthurlow cerror = smbfs_smb_close(smi->smi_share, fid, NULL, &scred); 1531*4bff34e3Sthurlow if (cerror) 1532*4bff34e3Sthurlow SMBERROR("error %d closing %s\\%s\n", 1533*4bff34e3Sthurlow cerror, dnp->n_rpath, name); 1534*4bff34e3Sthurlow 1535*4bff34e3Sthurlow /* 1536*4bff34e3Sthurlow * In the open case, the name may differ a little 1537*4bff34e3Sthurlow * from what we passed to create (case, etc.) 1538*4bff34e3Sthurlow * so call lookup to get the (opened) name. 1539*4bff34e3Sthurlow * 1540*4bff34e3Sthurlow * XXX: Could avoid this extra lookup if the 1541*4bff34e3Sthurlow * "createact" result from NT_CREATE says we 1542*4bff34e3Sthurlow * created the object. 1543*4bff34e3Sthurlow */ 1544*4bff34e3Sthurlow error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred); 1545*4bff34e3Sthurlow if (error) 1546*4bff34e3Sthurlow goto out; 1547*4bff34e3Sthurlow 1548*4bff34e3Sthurlow /* update attr and directory cache */ 1549*4bff34e3Sthurlow smbfs_attr_touchdir(dnp); 1550*4bff34e3Sthurlow 1551*4bff34e3Sthurlow error = smbfs_nget(dvp, name, nmlen, &fattr, &vp); 1552*4bff34e3Sthurlow if (error) 1553*4bff34e3Sthurlow goto out; 1554*4bff34e3Sthurlow 1555*4bff34e3Sthurlow #ifdef NOT_YET 1556*4bff34e3Sthurlow dnlc_update(dvp, name, vp); 1557*4bff34e3Sthurlow /* XXX invalidate pages if we truncated? */ 1558*4bff34e3Sthurlow #endif 1559*4bff34e3Sthurlow 1560*4bff34e3Sthurlow /* Success! */ 1561*4bff34e3Sthurlow *vpp = vp; 1562*4bff34e3Sthurlow error = 0; 1563*4bff34e3Sthurlow 1564*4bff34e3Sthurlow out: 1565*4bff34e3Sthurlow smb_credrele(&scred); 1566*4bff34e3Sthurlow if (name != nm) 1567*4bff34e3Sthurlow smbfs_name_free(name, nmlen); 1568*4bff34e3Sthurlow smbfs_rw_exit(&dnp->r_rwlock); 1569*4bff34e3Sthurlow return (error); 1570*4bff34e3Sthurlow } 1571*4bff34e3Sthurlow 1572*4bff34e3Sthurlow /* 1573*4bff34e3Sthurlow * XXX 1574*4bff34e3Sthurlow * This op should support the new FIGNORECASE flag for case-insensitive 1575*4bff34e3Sthurlow * lookups, per PSARC 2007/244. 1576*4bff34e3Sthurlow */ 1577*4bff34e3Sthurlow /* ARGSUSED */ 1578*4bff34e3Sthurlow static int 1579*4bff34e3Sthurlow smbfs_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct, 1580*4bff34e3Sthurlow int flags) 1581*4bff34e3Sthurlow { 1582*4bff34e3Sthurlow int error; 1583*4bff34e3Sthurlow vnode_t *vp; 1584*4bff34e3Sthurlow smbnode_t *np; 1585*4bff34e3Sthurlow smbnode_t *dnp; 1586*4bff34e3Sthurlow struct smb_cred scred; 1587*4bff34e3Sthurlow /* enum smbfsstat status; */ 1588*4bff34e3Sthurlow smbmntinfo_t *smi; 1589*4bff34e3Sthurlow 1590*4bff34e3Sthurlow smi = VTOSMI(dvp); 1591*4bff34e3Sthurlow 1592*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 1593*4bff34e3Sthurlow return (EPERM); 1594*4bff34e3Sthurlow 1595*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 1596*4bff34e3Sthurlow return (EIO); 1597*4bff34e3Sthurlow 1598*4bff34e3Sthurlow dnp = VTOSMB(dvp); 1599*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp))) 1600*4bff34e3Sthurlow return (EINTR); 1601*4bff34e3Sthurlow 1602*4bff34e3Sthurlow /* 1603*4bff34e3Sthurlow * Verify access to the dirctory. 1604*4bff34e3Sthurlow */ 1605*4bff34e3Sthurlow error = smbfs_access(dvp, VWRITE|VEXEC, 0, cr, ct); 1606*4bff34e3Sthurlow if (error) 1607*4bff34e3Sthurlow goto out; 1608*4bff34e3Sthurlow 1609*4bff34e3Sthurlow /* 1610*4bff34e3Sthurlow * NOTE: the darwin code gets the "vp" passed in so it looks 1611*4bff34e3Sthurlow * like the "vp" has probably been "lookup"ed by the VFS layer. 1612*4bff34e3Sthurlow * It looks like we will need to lookup the vp to check the 1613*4bff34e3Sthurlow * caches and check if the object being deleted is a directory. 1614*4bff34e3Sthurlow */ 1615*4bff34e3Sthurlow error = smbfslookup(dvp, nm, &vp, cr, 0, ct); 1616*4bff34e3Sthurlow if (error) 1617*4bff34e3Sthurlow goto out; 1618*4bff34e3Sthurlow 1619*4bff34e3Sthurlow /* Never allow link/unlink directories on CIFS. */ 1620*4bff34e3Sthurlow if (vp->v_type == VDIR) { 1621*4bff34e3Sthurlow VN_RELE(vp); 1622*4bff34e3Sthurlow error = EPERM; 1623*4bff34e3Sthurlow goto out; 1624*4bff34e3Sthurlow } 1625*4bff34e3Sthurlow 1626*4bff34e3Sthurlow #ifdef NOT_YET 1627*4bff34e3Sthurlow /* 1628*4bff34e3Sthurlow * First just remove the entry from the name cache, as it 1629*4bff34e3Sthurlow * is most likely the only entry for this vp. 1630*4bff34e3Sthurlow */ 1631*4bff34e3Sthurlow dnlc_remove(dvp, nm); 1632*4bff34e3Sthurlow 1633*4bff34e3Sthurlow /* 1634*4bff34e3Sthurlow * If the file has a v_count > 1 then there may be more than one 1635*4bff34e3Sthurlow * entry in the name cache due multiple links or an open file, 1636*4bff34e3Sthurlow * but we don't have the real reference count so flush all 1637*4bff34e3Sthurlow * possible entries. 1638*4bff34e3Sthurlow */ 1639*4bff34e3Sthurlow if (vp->v_count > 1) 1640*4bff34e3Sthurlow dnlc_purge_vp(vp); 1641*4bff34e3Sthurlow #endif /* NOT_YET */ 1642*4bff34e3Sthurlow 1643*4bff34e3Sthurlow /* 1644*4bff34e3Sthurlow * Now we have the real reference count on the vnode 1645*4bff34e3Sthurlow */ 1646*4bff34e3Sthurlow np = VTOSMB(vp); 1647*4bff34e3Sthurlow mutex_enter(&np->r_statelock); 1648*4bff34e3Sthurlow if (vp->v_count > 1) { 1649*4bff34e3Sthurlow /* 1650*4bff34e3Sthurlow * NFS does a rename on remove here. 1651*4bff34e3Sthurlow * Probably not applicable for SMB. 1652*4bff34e3Sthurlow * Like Darwin, just return EBUSY. 1653*4bff34e3Sthurlow * 1654*4bff34e3Sthurlow * XXX: Todo - Ask the server to set the 1655*4bff34e3Sthurlow * set the delete-on-close flag. 1656*4bff34e3Sthurlow */ 1657*4bff34e3Sthurlow mutex_exit(&np->r_statelock); 1658*4bff34e3Sthurlow error = EBUSY; 1659*4bff34e3Sthurlow goto out; 1660*4bff34e3Sthurlow } else { 1661*4bff34e3Sthurlow mutex_exit(&np->r_statelock); 1662*4bff34e3Sthurlow 1663*4bff34e3Sthurlow smb_credinit(&scred, curproc, cr); 1664*4bff34e3Sthurlow error = smbfs_smb_delete(np, &scred, NULL, 0, 0); 1665*4bff34e3Sthurlow smb_credrele(&scred); 1666*4bff34e3Sthurlow 1667*4bff34e3Sthurlow } 1668*4bff34e3Sthurlow 1669*4bff34e3Sthurlow VN_RELE(vp); 1670*4bff34e3Sthurlow 1671*4bff34e3Sthurlow out: 1672*4bff34e3Sthurlow smbfs_rw_exit(&dnp->r_rwlock); 1673*4bff34e3Sthurlow 1674*4bff34e3Sthurlow return (error); 1675*4bff34e3Sthurlow } 1676*4bff34e3Sthurlow 1677*4bff34e3Sthurlow 1678*4bff34e3Sthurlow /* 1679*4bff34e3Sthurlow * XXX 1680*4bff34e3Sthurlow * This op should support the new FIGNORECASE flag for case-insensitive 1681*4bff34e3Sthurlow * lookups, per PSARC 2007/244. 1682*4bff34e3Sthurlow */ 1683*4bff34e3Sthurlow /* ARGSUSED */ 1684*4bff34e3Sthurlow static int 1685*4bff34e3Sthurlow smbfs_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr, 1686*4bff34e3Sthurlow caller_context_t *ct, int flags) 1687*4bff34e3Sthurlow { 1688*4bff34e3Sthurlow /* vnode_t *realvp; */ 1689*4bff34e3Sthurlow 1690*4bff34e3Sthurlow if (curproc->p_zone != VTOSMI(odvp)->smi_zone || 1691*4bff34e3Sthurlow curproc->p_zone != VTOSMI(ndvp)->smi_zone) 1692*4bff34e3Sthurlow return (EPERM); 1693*4bff34e3Sthurlow 1694*4bff34e3Sthurlow if (VTOSMI(odvp)->smi_flags & SMI_DEAD || 1695*4bff34e3Sthurlow VTOSMI(ndvp)->smi_flags & SMI_DEAD || 1696*4bff34e3Sthurlow odvp->v_vfsp->vfs_flag & VFS_UNMOUNTED || 1697*4bff34e3Sthurlow ndvp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 1698*4bff34e3Sthurlow return (EIO); 1699*4bff34e3Sthurlow 1700*4bff34e3Sthurlow return (smbfsrename(odvp, onm, ndvp, nnm, cr, ct)); 1701*4bff34e3Sthurlow } 1702*4bff34e3Sthurlow 1703*4bff34e3Sthurlow /* 1704*4bff34e3Sthurlow * smbfsrename does the real work of renaming in SMBFS 1705*4bff34e3Sthurlow */ 1706*4bff34e3Sthurlow /* ARGSUSED */ 1707*4bff34e3Sthurlow static int 1708*4bff34e3Sthurlow smbfsrename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr, 1709*4bff34e3Sthurlow caller_context_t *ct) 1710*4bff34e3Sthurlow { 1711*4bff34e3Sthurlow int error; 1712*4bff34e3Sthurlow int nvp_locked = 0; 1713*4bff34e3Sthurlow vnode_t *nvp = NULL; 1714*4bff34e3Sthurlow vnode_t *ovp = NULL; 1715*4bff34e3Sthurlow smbnode_t *onp; 1716*4bff34e3Sthurlow smbnode_t *odnp; 1717*4bff34e3Sthurlow smbnode_t *ndnp; 1718*4bff34e3Sthurlow struct smb_cred scred; 1719*4bff34e3Sthurlow /* enum smbfsstat status; */ 1720*4bff34e3Sthurlow 1721*4bff34e3Sthurlow ASSERT(curproc->p_zone == VTOSMI(odvp)->smi_zone); 1722*4bff34e3Sthurlow 1723*4bff34e3Sthurlow if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 || 1724*4bff34e3Sthurlow strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0) 1725*4bff34e3Sthurlow return (EINVAL); 1726*4bff34e3Sthurlow 1727*4bff34e3Sthurlow /* 1728*4bff34e3Sthurlow * Check that everything is on the same filesystem. 1729*4bff34e3Sthurlow * vn_rename checks the fsid's, but in case we don't 1730*4bff34e3Sthurlow * fill those in correctly, check here too. 1731*4bff34e3Sthurlow */ 1732*4bff34e3Sthurlow if (odvp->v_vfsp != ndvp->v_vfsp) 1733*4bff34e3Sthurlow return (EXDEV); 1734*4bff34e3Sthurlow 1735*4bff34e3Sthurlow odnp = VTOSMB(odvp); 1736*4bff34e3Sthurlow ndnp = VTOSMB(ndvp); 1737*4bff34e3Sthurlow 1738*4bff34e3Sthurlow /* 1739*4bff34e3Sthurlow * Avoid deadlock here on old vs new directory nodes 1740*4bff34e3Sthurlow * by always taking the locks in order of address. 1741*4bff34e3Sthurlow * The order is arbitrary, but must be consistent. 1742*4bff34e3Sthurlow */ 1743*4bff34e3Sthurlow if (odnp < ndnp) { 1744*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER, 1745*4bff34e3Sthurlow SMBINTR(odvp))) 1746*4bff34e3Sthurlow return (EINTR); 1747*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER, 1748*4bff34e3Sthurlow SMBINTR(ndvp))) { 1749*4bff34e3Sthurlow smbfs_rw_exit(&odnp->r_rwlock); 1750*4bff34e3Sthurlow return (EINTR); 1751*4bff34e3Sthurlow } 1752*4bff34e3Sthurlow } else { 1753*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER, 1754*4bff34e3Sthurlow SMBINTR(ndvp))) 1755*4bff34e3Sthurlow return (EINTR); 1756*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER, 1757*4bff34e3Sthurlow SMBINTR(odvp))) { 1758*4bff34e3Sthurlow smbfs_rw_exit(&ndnp->r_rwlock); 1759*4bff34e3Sthurlow return (EINTR); 1760*4bff34e3Sthurlow } 1761*4bff34e3Sthurlow } 1762*4bff34e3Sthurlow /* 1763*4bff34e3Sthurlow * No returns after this point (goto out) 1764*4bff34e3Sthurlow */ 1765*4bff34e3Sthurlow 1766*4bff34e3Sthurlow /* 1767*4bff34e3Sthurlow * Need write access on source and target. 1768*4bff34e3Sthurlow * Server takes care of most checks. 1769*4bff34e3Sthurlow */ 1770*4bff34e3Sthurlow error = smbfs_access(odvp, VWRITE|VEXEC, 0, cr, ct); 1771*4bff34e3Sthurlow if (error) 1772*4bff34e3Sthurlow goto out; 1773*4bff34e3Sthurlow if (odvp != ndvp) { 1774*4bff34e3Sthurlow error = smbfs_access(ndvp, VWRITE, 0, cr, ct); 1775*4bff34e3Sthurlow if (error) 1776*4bff34e3Sthurlow goto out; 1777*4bff34e3Sthurlow } 1778*4bff34e3Sthurlow 1779*4bff34e3Sthurlow /* 1780*4bff34e3Sthurlow * Lookup the source name. Must already exist. 1781*4bff34e3Sthurlow */ 1782*4bff34e3Sthurlow error = smbfslookup(odvp, onm, &ovp, cr, 0, ct); 1783*4bff34e3Sthurlow if (error) 1784*4bff34e3Sthurlow goto out; 1785*4bff34e3Sthurlow 1786*4bff34e3Sthurlow /* 1787*4bff34e3Sthurlow * Lookup the target file. If it exists, it needs to be 1788*4bff34e3Sthurlow * checked to see whether it is a mount point and whether 1789*4bff34e3Sthurlow * it is active (open). 1790*4bff34e3Sthurlow */ 1791*4bff34e3Sthurlow error = smbfslookup(ndvp, nnm, &nvp, cr, 0, ct); 1792*4bff34e3Sthurlow if (!error) { 1793*4bff34e3Sthurlow /* 1794*4bff34e3Sthurlow * Target (nvp) already exists. Check that it 1795*4bff34e3Sthurlow * has the same type as the source. The server 1796*4bff34e3Sthurlow * will check this also, (and more reliably) but 1797*4bff34e3Sthurlow * this lets us return the correct error codes. 1798*4bff34e3Sthurlow */ 1799*4bff34e3Sthurlow if (ovp->v_type == VDIR) { 1800*4bff34e3Sthurlow if (nvp->v_type != VDIR) { 1801*4bff34e3Sthurlow error = ENOTDIR; 1802*4bff34e3Sthurlow goto out; 1803*4bff34e3Sthurlow } 1804*4bff34e3Sthurlow } else { 1805*4bff34e3Sthurlow if (nvp->v_type == VDIR) { 1806*4bff34e3Sthurlow error = EISDIR; 1807*4bff34e3Sthurlow goto out; 1808*4bff34e3Sthurlow } 1809*4bff34e3Sthurlow } 1810*4bff34e3Sthurlow 1811*4bff34e3Sthurlow /* 1812*4bff34e3Sthurlow * POSIX dictates that when the source and target 1813*4bff34e3Sthurlow * entries refer to the same file object, rename 1814*4bff34e3Sthurlow * must do nothing and exit without error. 1815*4bff34e3Sthurlow */ 1816*4bff34e3Sthurlow if (ovp == nvp) { 1817*4bff34e3Sthurlow error = 0; 1818*4bff34e3Sthurlow goto out; 1819*4bff34e3Sthurlow } 1820*4bff34e3Sthurlow 1821*4bff34e3Sthurlow /* 1822*4bff34e3Sthurlow * Also must ensure the target is not a mount point, 1823*4bff34e3Sthurlow * and keep mount/umount away until we're done. 1824*4bff34e3Sthurlow */ 1825*4bff34e3Sthurlow if (vn_vfsrlock(nvp)) { 1826*4bff34e3Sthurlow error = EBUSY; 1827*4bff34e3Sthurlow goto out; 1828*4bff34e3Sthurlow } 1829*4bff34e3Sthurlow nvp_locked = 1; 1830*4bff34e3Sthurlow if (vn_mountedvfs(nvp) != NULL) { 1831*4bff34e3Sthurlow error = EBUSY; 1832*4bff34e3Sthurlow goto out; 1833*4bff34e3Sthurlow } 1834*4bff34e3Sthurlow 1835*4bff34e3Sthurlow #ifdef NOT_YET 1836*4bff34e3Sthurlow /* 1837*4bff34e3Sthurlow * Purge the name cache of all references to this vnode 1838*4bff34e3Sthurlow * so that we can check the reference count to infer 1839*4bff34e3Sthurlow * whether it is active or not. 1840*4bff34e3Sthurlow */ 1841*4bff34e3Sthurlow /* 1842*4bff34e3Sthurlow * First just remove the entry from the name cache, as it 1843*4bff34e3Sthurlow * is most likely the only entry for this vp. 1844*4bff34e3Sthurlow */ 1845*4bff34e3Sthurlow dnlc_remove(ndvp, nnm); 1846*4bff34e3Sthurlow /* 1847*4bff34e3Sthurlow * If the file has a v_count > 1 then there may be more 1848*4bff34e3Sthurlow * than one entry in the name cache due multiple links 1849*4bff34e3Sthurlow * or an open file, but we don't have the real reference 1850*4bff34e3Sthurlow * count so flush all possible entries. 1851*4bff34e3Sthurlow */ 1852*4bff34e3Sthurlow if (nvp->v_count > 1) 1853*4bff34e3Sthurlow dnlc_purge_vp(nvp); 1854*4bff34e3Sthurlow #endif 1855*4bff34e3Sthurlow 1856*4bff34e3Sthurlow if (nvp->v_count > 1 && nvp->v_type != VDIR) { 1857*4bff34e3Sthurlow /* 1858*4bff34e3Sthurlow * The target file exists, is not the same as 1859*4bff34e3Sthurlow * the source file, and is active. Other FS 1860*4bff34e3Sthurlow * implementations unlink the target here. 1861*4bff34e3Sthurlow * For SMB, we don't assume we can remove an 1862*4bff34e3Sthurlow * open file. Return an error instead. 1863*4bff34e3Sthurlow * Darwin returned an error here too. 1864*4bff34e3Sthurlow */ 1865*4bff34e3Sthurlow error = EEXIST; 1866*4bff34e3Sthurlow goto out; 1867*4bff34e3Sthurlow } 1868*4bff34e3Sthurlow } /* nvp */ 1869*4bff34e3Sthurlow 1870*4bff34e3Sthurlow #ifdef NOT_YET 1871*4bff34e3Sthurlow dnlc_remove(odvp, onm); 1872*4bff34e3Sthurlow dnlc_remove(ndvp, nnm); 1873*4bff34e3Sthurlow #endif 1874*4bff34e3Sthurlow 1875*4bff34e3Sthurlow onp = VTOSMB(ovp); 1876*4bff34e3Sthurlow smb_credinit(&scred, curproc, cr); 1877*4bff34e3Sthurlow error = smbfs_smb_rename(onp, ndnp, nnm, strlen(nnm), &scred); 1878*4bff34e3Sthurlow smb_credrele(&scred); 1879*4bff34e3Sthurlow 1880*4bff34e3Sthurlow 1881*4bff34e3Sthurlow out: 1882*4bff34e3Sthurlow if (nvp) { 1883*4bff34e3Sthurlow if (nvp_locked) 1884*4bff34e3Sthurlow vn_vfsunlock(nvp); 1885*4bff34e3Sthurlow VN_RELE(nvp); 1886*4bff34e3Sthurlow } 1887*4bff34e3Sthurlow if (ovp) 1888*4bff34e3Sthurlow VN_RELE(ovp); 1889*4bff34e3Sthurlow 1890*4bff34e3Sthurlow smbfs_rw_exit(&odnp->r_rwlock); 1891*4bff34e3Sthurlow smbfs_rw_exit(&ndnp->r_rwlock); 1892*4bff34e3Sthurlow 1893*4bff34e3Sthurlow return (error); 1894*4bff34e3Sthurlow } 1895*4bff34e3Sthurlow 1896*4bff34e3Sthurlow /* 1897*4bff34e3Sthurlow * XXX 1898*4bff34e3Sthurlow * vsecattr_t is new to build 77, and we need to eventually support 1899*4bff34e3Sthurlow * it in order to create an ACL when an object is created. 1900*4bff34e3Sthurlow * 1901*4bff34e3Sthurlow * This op should support the new FIGNORECASE flag for case-insensitive 1902*4bff34e3Sthurlow * lookups, per PSARC 2007/244. 1903*4bff34e3Sthurlow */ 1904*4bff34e3Sthurlow /* ARGSUSED */ 1905*4bff34e3Sthurlow static int 1906*4bff34e3Sthurlow smbfs_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp, 1907*4bff34e3Sthurlow cred_t *cr, caller_context_t *ct, int flags, vsecattr_t *vsecp) 1908*4bff34e3Sthurlow { 1909*4bff34e3Sthurlow vnode_t *vp; 1910*4bff34e3Sthurlow struct smbnode *dnp = VTOSMB(dvp); 1911*4bff34e3Sthurlow struct smbmntinfo *smi = VTOSMI(dvp); 1912*4bff34e3Sthurlow struct smb_cred scred; 1913*4bff34e3Sthurlow struct smbfattr fattr; 1914*4bff34e3Sthurlow const char *name = (const char *) nm; 1915*4bff34e3Sthurlow int nmlen = strlen(name); 1916*4bff34e3Sthurlow int error, hiderr; 1917*4bff34e3Sthurlow 1918*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 1919*4bff34e3Sthurlow return (EPERM); 1920*4bff34e3Sthurlow 1921*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 1922*4bff34e3Sthurlow return (EIO); 1923*4bff34e3Sthurlow 1924*4bff34e3Sthurlow if ((nmlen == 1 && name[0] == '.') || 1925*4bff34e3Sthurlow (nmlen == 2 && name[0] == '.' && name[1] == '.')) 1926*4bff34e3Sthurlow return (EEXIST); 1927*4bff34e3Sthurlow 1928*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp))) 1929*4bff34e3Sthurlow return (EINTR); 1930*4bff34e3Sthurlow smb_credinit(&scred, curproc, cr); 1931*4bff34e3Sthurlow 1932*4bff34e3Sthurlow /* 1933*4bff34e3Sthurlow * XXX: Do we need r_lkserlock too? 1934*4bff34e3Sthurlow * No use of any shared fid or fctx... 1935*4bff34e3Sthurlow */ 1936*4bff34e3Sthurlow 1937*4bff34e3Sthurlow /* 1938*4bff34e3Sthurlow * Require write access in the containing directory. 1939*4bff34e3Sthurlow */ 1940*4bff34e3Sthurlow error = smbfs_access(dvp, VWRITE, 0, cr, ct); 1941*4bff34e3Sthurlow if (error) 1942*4bff34e3Sthurlow goto out; 1943*4bff34e3Sthurlow 1944*4bff34e3Sthurlow error = smbfs_smb_mkdir(dnp, name, nmlen, &scred); 1945*4bff34e3Sthurlow if (error) 1946*4bff34e3Sthurlow goto out; 1947*4bff34e3Sthurlow 1948*4bff34e3Sthurlow error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred); 1949*4bff34e3Sthurlow if (error) 1950*4bff34e3Sthurlow goto out; 1951*4bff34e3Sthurlow 1952*4bff34e3Sthurlow smbfs_attr_touchdir(dnp); 1953*4bff34e3Sthurlow 1954*4bff34e3Sthurlow error = smbfs_nget(dvp, name, nmlen, &fattr, &vp); 1955*4bff34e3Sthurlow if (error) 1956*4bff34e3Sthurlow goto out; 1957*4bff34e3Sthurlow 1958*4bff34e3Sthurlow #ifdef NOT_YET 1959*4bff34e3Sthurlow dnlc_update(dvp, name, vp); 1960*4bff34e3Sthurlow #endif 1961*4bff34e3Sthurlow 1962*4bff34e3Sthurlow if (name[0] == '.') 1963*4bff34e3Sthurlow if ((hiderr = smbfs_smb_hideit(VTOSMB(vp), NULL, 0, &scred))) 1964*4bff34e3Sthurlow SMBVDEBUG("hide failure %d\n", hiderr); 1965*4bff34e3Sthurlow 1966*4bff34e3Sthurlow /* Success! */ 1967*4bff34e3Sthurlow *vpp = vp; 1968*4bff34e3Sthurlow error = 0; 1969*4bff34e3Sthurlow out: 1970*4bff34e3Sthurlow smb_credrele(&scred); 1971*4bff34e3Sthurlow smbfs_rw_exit(&dnp->r_rwlock); 1972*4bff34e3Sthurlow 1973*4bff34e3Sthurlow if (name != nm) 1974*4bff34e3Sthurlow smbfs_name_free(name, nmlen); 1975*4bff34e3Sthurlow 1976*4bff34e3Sthurlow return (error); 1977*4bff34e3Sthurlow } 1978*4bff34e3Sthurlow 1979*4bff34e3Sthurlow /* 1980*4bff34e3Sthurlow * XXX 1981*4bff34e3Sthurlow * This op should support the new FIGNORECASE flag for case-insensitive 1982*4bff34e3Sthurlow * lookups, per PSARC 2007/244. 1983*4bff34e3Sthurlow */ 1984*4bff34e3Sthurlow /* ARGSUSED */ 1985*4bff34e3Sthurlow static int 1986*4bff34e3Sthurlow smbfs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr, 1987*4bff34e3Sthurlow caller_context_t *ct, int flags) 1988*4bff34e3Sthurlow { 1989*4bff34e3Sthurlow vnode_t *vp = NULL; 1990*4bff34e3Sthurlow int vp_locked = 0; 1991*4bff34e3Sthurlow struct smbmntinfo *smi = VTOSMI(dvp); 1992*4bff34e3Sthurlow struct smbnode *dnp = VTOSMB(dvp); 1993*4bff34e3Sthurlow struct smbnode *np; 1994*4bff34e3Sthurlow struct smb_cred scred; 1995*4bff34e3Sthurlow int error; 1996*4bff34e3Sthurlow 1997*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 1998*4bff34e3Sthurlow return (EPERM); 1999*4bff34e3Sthurlow 2000*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 2001*4bff34e3Sthurlow return (EIO); 2002*4bff34e3Sthurlow 2003*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp))) 2004*4bff34e3Sthurlow return (EINTR); 2005*4bff34e3Sthurlow smb_credinit(&scred, curproc, cr); 2006*4bff34e3Sthurlow 2007*4bff34e3Sthurlow /* 2008*4bff34e3Sthurlow * Require w/x access in the containing directory. 2009*4bff34e3Sthurlow * Server handles all other access checks. 2010*4bff34e3Sthurlow */ 2011*4bff34e3Sthurlow error = smbfs_access(dvp, VEXEC|VWRITE, 0, cr, ct); 2012*4bff34e3Sthurlow if (error) 2013*4bff34e3Sthurlow goto out; 2014*4bff34e3Sthurlow 2015*4bff34e3Sthurlow /* 2016*4bff34e3Sthurlow * First lookup the entry to be removed. 2017*4bff34e3Sthurlow */ 2018*4bff34e3Sthurlow error = smbfslookup(dvp, nm, &vp, cr, 0, ct); 2019*4bff34e3Sthurlow if (error) 2020*4bff34e3Sthurlow goto out; 2021*4bff34e3Sthurlow np = VTOSMB(vp); 2022*4bff34e3Sthurlow 2023*4bff34e3Sthurlow /* 2024*4bff34e3Sthurlow * Disallow rmdir of "." or current dir, or the FS root. 2025*4bff34e3Sthurlow * Also make sure it's a directory, not a mount point, 2026*4bff34e3Sthurlow * and lock to keep mount/umount away until we're done. 2027*4bff34e3Sthurlow */ 2028*4bff34e3Sthurlow if ((vp == dvp) || (vp == cdir) || (vp->v_flag & VROOT)) { 2029*4bff34e3Sthurlow error = EINVAL; 2030*4bff34e3Sthurlow goto out; 2031*4bff34e3Sthurlow } 2032*4bff34e3Sthurlow if (vp->v_type != VDIR) { 2033*4bff34e3Sthurlow error = ENOTDIR; 2034*4bff34e3Sthurlow goto out; 2035*4bff34e3Sthurlow } 2036*4bff34e3Sthurlow if (vn_vfsrlock(vp)) { 2037*4bff34e3Sthurlow error = EBUSY; 2038*4bff34e3Sthurlow goto out; 2039*4bff34e3Sthurlow } 2040*4bff34e3Sthurlow vp_locked = 1; 2041*4bff34e3Sthurlow if (vn_mountedvfs(vp) != NULL) { 2042*4bff34e3Sthurlow error = EBUSY; 2043*4bff34e3Sthurlow goto out; 2044*4bff34e3Sthurlow } 2045*4bff34e3Sthurlow 2046*4bff34e3Sthurlow error = smbfs_smb_rmdir(np, &scred); 2047*4bff34e3Sthurlow if (error) 2048*4bff34e3Sthurlow goto out; 2049*4bff34e3Sthurlow 2050*4bff34e3Sthurlow mutex_enter(&np->r_statelock); 2051*4bff34e3Sthurlow dnp->n_flag |= NMODIFIED; 2052*4bff34e3Sthurlow mutex_exit(&np->r_statelock); 2053*4bff34e3Sthurlow smbfs_attr_touchdir(dnp); 2054*4bff34e3Sthurlow #ifdef NOT_YET 2055*4bff34e3Sthurlow dnlc_remove(dvp, nm); 2056*4bff34e3Sthurlow dnlc_purge_vp(vp); 2057*4bff34e3Sthurlow #endif 2058*4bff34e3Sthurlow smb_rmhash(np); 2059*4bff34e3Sthurlow 2060*4bff34e3Sthurlow out: 2061*4bff34e3Sthurlow if (vp) { 2062*4bff34e3Sthurlow if (vp_locked) 2063*4bff34e3Sthurlow vn_vfsunlock(vp); 2064*4bff34e3Sthurlow VN_RELE(vp); 2065*4bff34e3Sthurlow } 2066*4bff34e3Sthurlow smb_credrele(&scred); 2067*4bff34e3Sthurlow smbfs_rw_exit(&dnp->r_rwlock); 2068*4bff34e3Sthurlow 2069*4bff34e3Sthurlow return (error); 2070*4bff34e3Sthurlow } 2071*4bff34e3Sthurlow 2072*4bff34e3Sthurlow 2073*4bff34e3Sthurlow /* ARGSUSED */ 2074*4bff34e3Sthurlow static int 2075*4bff34e3Sthurlow smbfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp, 2076*4bff34e3Sthurlow caller_context_t *ct, int flags) 2077*4bff34e3Sthurlow { 2078*4bff34e3Sthurlow struct smbnode *np = VTOSMB(vp); 2079*4bff34e3Sthurlow int error = 0; 2080*4bff34e3Sthurlow smbmntinfo_t *smi; 2081*4bff34e3Sthurlow 2082*4bff34e3Sthurlow smi = VTOSMI(vp); 2083*4bff34e3Sthurlow 2084*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 2085*4bff34e3Sthurlow return (EIO); 2086*4bff34e3Sthurlow 2087*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 2088*4bff34e3Sthurlow return (EIO); 2089*4bff34e3Sthurlow 2090*4bff34e3Sthurlow /* 2091*4bff34e3Sthurlow * Require read access in the directory. 2092*4bff34e3Sthurlow */ 2093*4bff34e3Sthurlow error = smbfs_access(vp, VREAD, 0, cr, ct); 2094*4bff34e3Sthurlow if (error) 2095*4bff34e3Sthurlow return (error); 2096*4bff34e3Sthurlow 2097*4bff34e3Sthurlow ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER)); 2098*4bff34e3Sthurlow 2099*4bff34e3Sthurlow /* 2100*4bff34e3Sthurlow * XXX: Todo readdir cache here 2101*4bff34e3Sthurlow * Note: NFS code is just below this. 2102*4bff34e3Sthurlow * 2103*4bff34e3Sthurlow * I am serializing the entire readdir opreation 2104*4bff34e3Sthurlow * now since we have not yet implemented readdir 2105*4bff34e3Sthurlow * cache. This fix needs to be revisited once 2106*4bff34e3Sthurlow * we implement readdir cache. 2107*4bff34e3Sthurlow */ 2108*4bff34e3Sthurlow if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp))) 2109*4bff34e3Sthurlow return (EINTR); 2110*4bff34e3Sthurlow 2111*4bff34e3Sthurlow error = smbfs_readvdir(vp, uiop, cr, eofp, ct); 2112*4bff34e3Sthurlow 2113*4bff34e3Sthurlow smbfs_rw_exit(&np->r_lkserlock); 2114*4bff34e3Sthurlow 2115*4bff34e3Sthurlow return (error); 2116*4bff34e3Sthurlow } 2117*4bff34e3Sthurlow 2118*4bff34e3Sthurlow /* ARGSUSED */ 2119*4bff34e3Sthurlow static int 2120*4bff34e3Sthurlow smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, 2121*4bff34e3Sthurlow caller_context_t *ct) 2122*4bff34e3Sthurlow { 2123*4bff34e3Sthurlow size_t dbufsiz; 2124*4bff34e3Sthurlow struct dirent64 *dp; 2125*4bff34e3Sthurlow struct smb_cred scred; 2126*4bff34e3Sthurlow vnode_t *newvp; 2127*4bff34e3Sthurlow struct smbnode *np = VTOSMB(vp); 2128*4bff34e3Sthurlow int nmlen, reclen, error = 0; 2129*4bff34e3Sthurlow long offset, limit; 2130*4bff34e3Sthurlow struct smbfs_fctx *ctx; 2131*4bff34e3Sthurlow 2132*4bff34e3Sthurlow ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone); 2133*4bff34e3Sthurlow 2134*4bff34e3Sthurlow /* Make sure we serialize for n_dirseq use. */ 2135*4bff34e3Sthurlow ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER)); 2136*4bff34e3Sthurlow 2137*4bff34e3Sthurlow /* Min size is DIRENT64_RECLEN(256) rounded up. */ 2138*4bff34e3Sthurlow if (uio->uio_resid < 512 || uio->uio_offset < 0) 2139*4bff34e3Sthurlow return (EINVAL); 2140*4bff34e3Sthurlow 2141*4bff34e3Sthurlow /* 2142*4bff34e3Sthurlow * This dnlc_purge_vp ensures that name cache for this dir will be 2143*4bff34e3Sthurlow * current - it'll only have the items for which the smbfs_nget 2144*4bff34e3Sthurlow * MAKEENTRY happened. 2145*4bff34e3Sthurlow */ 2146*4bff34e3Sthurlow #ifdef NOT_YET 2147*4bff34e3Sthurlow if (smbfs_fastlookup) 2148*4bff34e3Sthurlow dnlc_purge_vp(vp); 2149*4bff34e3Sthurlow #endif 2150*4bff34e3Sthurlow SMBVDEBUG("dirname='%s'\n", np->n_rpath); 2151*4bff34e3Sthurlow smb_credinit(&scred, curproc, cr); 2152*4bff34e3Sthurlow dbufsiz = DIRENT64_RECLEN(MAXNAMELEN); 2153*4bff34e3Sthurlow dp = kmem_alloc(dbufsiz, KM_SLEEP); 2154*4bff34e3Sthurlow 2155*4bff34e3Sthurlow offset = uio->uio_offset; /* NB: "cookie" */ 2156*4bff34e3Sthurlow limit = uio->uio_resid / DIRENT64_RECLEN(1); 2157*4bff34e3Sthurlow SMBVDEBUG("offset=0x%ld, limit=0x%ld\n", offset, limit); 2158*4bff34e3Sthurlow 2159*4bff34e3Sthurlow if (offset == 0) { 2160*4bff34e3Sthurlow /* Don't know EOF until findclose */ 2161*4bff34e3Sthurlow np->n_direof = -1; 2162*4bff34e3Sthurlow } else if (offset == np->n_direof) { 2163*4bff34e3Sthurlow /* Arrived at end of directory. */ 2164*4bff34e3Sthurlow goto out; 2165*4bff34e3Sthurlow } 2166*4bff34e3Sthurlow 2167*4bff34e3Sthurlow /* 2168*4bff34e3Sthurlow * Generate the "." and ".." entries here so we can 2169*4bff34e3Sthurlow * (1) make sure they appear (but only once), and 2170*4bff34e3Sthurlow * (2) deal with getting their I numbers which the 2171*4bff34e3Sthurlow * findnext below does only for normal names. 2172*4bff34e3Sthurlow */ 2173*4bff34e3Sthurlow while (limit && offset < 2) { 2174*4bff34e3Sthurlow limit--; 2175*4bff34e3Sthurlow reclen = DIRENT64_RECLEN(offset + 1); 2176*4bff34e3Sthurlow bzero(dp, reclen); 2177*4bff34e3Sthurlow /*LINTED*/ 2178*4bff34e3Sthurlow dp->d_reclen = reclen; 2179*4bff34e3Sthurlow /* Tricky: offset 0 is ".", offset 1 is ".." */ 2180*4bff34e3Sthurlow dp->d_name[0] = '.'; 2181*4bff34e3Sthurlow dp->d_name[1] = '.'; 2182*4bff34e3Sthurlow dp->d_name[offset + 1] = '\0'; 2183*4bff34e3Sthurlow /* 2184*4bff34e3Sthurlow * Want the real I-numbers for the "." and ".." 2185*4bff34e3Sthurlow * entries. For these two names, we know that 2186*4bff34e3Sthurlow * smbfslookup can do this all locally. 2187*4bff34e3Sthurlow */ 2188*4bff34e3Sthurlow error = smbfslookup(vp, dp->d_name, &newvp, cr, 1, ct); 2189*4bff34e3Sthurlow if (error) { 2190*4bff34e3Sthurlow dp->d_ino = np->n_ino + offset; /* fiction */ 2191*4bff34e3Sthurlow } else { 2192*4bff34e3Sthurlow dp->d_ino = VTOSMB(newvp)->n_ino; 2193*4bff34e3Sthurlow VN_RELE(newvp); 2194*4bff34e3Sthurlow } 2195*4bff34e3Sthurlow dp->d_off = offset + 1; /* see d_off below */ 2196*4bff34e3Sthurlow error = uiomove(dp, dp->d_reclen, UIO_READ, uio); 2197*4bff34e3Sthurlow if (error) 2198*4bff34e3Sthurlow goto out; 2199*4bff34e3Sthurlow uio->uio_offset = ++offset; 2200*4bff34e3Sthurlow } 2201*4bff34e3Sthurlow if (limit == 0) 2202*4bff34e3Sthurlow goto out; 2203*4bff34e3Sthurlow if (offset != np->n_dirofs || np->n_dirseq == NULL) { 2204*4bff34e3Sthurlow SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs); 2205*4bff34e3Sthurlow if (np->n_dirseq) { 2206*4bff34e3Sthurlow (void) smbfs_smb_findclose(np->n_dirseq, &scred); 2207*4bff34e3Sthurlow np->n_dirseq = NULL; 2208*4bff34e3Sthurlow } 2209*4bff34e3Sthurlow np->n_dirofs = 2; 2210*4bff34e3Sthurlow error = smbfs_smb_findopen(np, "*", 1, 2211*4bff34e3Sthurlow SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR, 2212*4bff34e3Sthurlow &scred, &ctx); 2213*4bff34e3Sthurlow if (error) { 2214*4bff34e3Sthurlow SMBVDEBUG("can not open search, error = %d", error); 2215*4bff34e3Sthurlow goto out; 2216*4bff34e3Sthurlow } 2217*4bff34e3Sthurlow np->n_dirseq = ctx; 2218*4bff34e3Sthurlow } else 2219*4bff34e3Sthurlow ctx = np->n_dirseq; 2220*4bff34e3Sthurlow while (np->n_dirofs < offset) { 2221*4bff34e3Sthurlow if (smbfs_smb_findnext(ctx, offset - np->n_dirofs++, 2222*4bff34e3Sthurlow &scred) != 0) { 2223*4bff34e3Sthurlow (void) smbfs_smb_findclose(np->n_dirseq, &scred); 2224*4bff34e3Sthurlow np->n_dirseq = NULL; 2225*4bff34e3Sthurlow np->n_direof = np->n_dirofs; 2226*4bff34e3Sthurlow np->n_dirofs = 0; 2227*4bff34e3Sthurlow *eofp = 1; 2228*4bff34e3Sthurlow error = 0; 2229*4bff34e3Sthurlow goto out; 2230*4bff34e3Sthurlow } 2231*4bff34e3Sthurlow } 2232*4bff34e3Sthurlow error = 0; 2233*4bff34e3Sthurlow for (; limit; limit--) { 2234*4bff34e3Sthurlow error = smbfs_smb_findnext(ctx, limit, &scred); 2235*4bff34e3Sthurlow if (error) { 2236*4bff34e3Sthurlow if (error == EBADRPC) 2237*4bff34e3Sthurlow error = ENOENT; 2238*4bff34e3Sthurlow (void) smbfs_smb_findclose(np->n_dirseq, &scred); 2239*4bff34e3Sthurlow np->n_dirseq = NULL; 2240*4bff34e3Sthurlow np->n_direof = np->n_dirofs; 2241*4bff34e3Sthurlow np->n_dirofs = 0; 2242*4bff34e3Sthurlow *eofp = 1; 2243*4bff34e3Sthurlow error = 0; 2244*4bff34e3Sthurlow break; 2245*4bff34e3Sthurlow } 2246*4bff34e3Sthurlow np->n_dirofs++; 2247*4bff34e3Sthurlow /* Sanity check the name length. */ 2248*4bff34e3Sthurlow nmlen = ctx->f_nmlen; 2249*4bff34e3Sthurlow if (nmlen > (MAXNAMELEN - 1)) { 2250*4bff34e3Sthurlow nmlen = MAXNAMELEN - 1; 2251*4bff34e3Sthurlow SMBVDEBUG("Truncating name: %s\n", ctx->f_name); 2252*4bff34e3Sthurlow } 2253*4bff34e3Sthurlow reclen = DIRENT64_RECLEN(nmlen); 2254*4bff34e3Sthurlow if (uio->uio_resid < reclen) 2255*4bff34e3Sthurlow break; 2256*4bff34e3Sthurlow bzero(dp, reclen); 2257*4bff34e3Sthurlow /*LINTED*/ 2258*4bff34e3Sthurlow dp->d_reclen = reclen; 2259*4bff34e3Sthurlow dp->d_ino = ctx->f_attr.fa_ino; 2260*4bff34e3Sthurlow /* 2261*4bff34e3Sthurlow * Note: d_off is the offset that a user-level program 2262*4bff34e3Sthurlow * should seek to for reading the _next_ directory entry. 2263*4bff34e3Sthurlow * See libc: readdir, telldir, seekdir 2264*4bff34e3Sthurlow */ 2265*4bff34e3Sthurlow dp->d_off = offset + 1; 2266*4bff34e3Sthurlow bcopy(ctx->f_name, dp->d_name, nmlen); 2267*4bff34e3Sthurlow dp->d_name[nmlen] = '\0'; 2268*4bff34e3Sthurlow #ifdef NOT_YET 2269*4bff34e3Sthurlow if (smbfs_fastlookup) { 2270*4bff34e3Sthurlow if (smbfs_nget(vp, ctx->f_name, 2271*4bff34e3Sthurlow ctx->f_nmlen, &ctx->f_attr, &newvp) == 0) 2272*4bff34e3Sthurlow VN_RELE(newvp); 2273*4bff34e3Sthurlow } 2274*4bff34e3Sthurlow #endif /* NOT_YET */ 2275*4bff34e3Sthurlow error = uiomove(dp, dp->d_reclen, UIO_READ, uio); 2276*4bff34e3Sthurlow if (error) 2277*4bff34e3Sthurlow break; 2278*4bff34e3Sthurlow uio->uio_offset = ++offset; 2279*4bff34e3Sthurlow } 2280*4bff34e3Sthurlow if (error == ENOENT) 2281*4bff34e3Sthurlow error = 0; 2282*4bff34e3Sthurlow out: 2283*4bff34e3Sthurlow kmem_free(dp, dbufsiz); 2284*4bff34e3Sthurlow smb_credrele(&scred); 2285*4bff34e3Sthurlow return (error); 2286*4bff34e3Sthurlow } 2287*4bff34e3Sthurlow 2288*4bff34e3Sthurlow 2289*4bff34e3Sthurlow /* 2290*4bff34e3Sthurlow * The pair of functions VOP_RWLOCK, VOP_RWUNLOCK 2291*4bff34e3Sthurlow * are optional functions that are called by: 2292*4bff34e3Sthurlow * getdents, before/after VOP_READDIR 2293*4bff34e3Sthurlow * pread, before/after ... VOP_READ 2294*4bff34e3Sthurlow * pwrite, before/after ... VOP_WRITE 2295*4bff34e3Sthurlow * (other places) 2296*4bff34e3Sthurlow * 2297*4bff34e3Sthurlow * Careful here: None of the above check for any 2298*4bff34e3Sthurlow * error returns from VOP_RWLOCK / VOP_RWUNLOCK! 2299*4bff34e3Sthurlow * In fact, the return value from _rwlock is NOT 2300*4bff34e3Sthurlow * an error code, but V_WRITELOCK_TRUE / _FALSE. 2301*4bff34e3Sthurlow * 2302*4bff34e3Sthurlow * Therefore, it's up to _this_ code to make sure 2303*4bff34e3Sthurlow * the lock state remains balanced, which means 2304*4bff34e3Sthurlow * we can't "bail out" on interrupts, etc. 2305*4bff34e3Sthurlow */ 2306*4bff34e3Sthurlow 2307*4bff34e3Sthurlow /* ARGSUSED2 */ 2308*4bff34e3Sthurlow static int 2309*4bff34e3Sthurlow smbfs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp) 2310*4bff34e3Sthurlow { 2311*4bff34e3Sthurlow smbnode_t *np = VTOSMB(vp); 2312*4bff34e3Sthurlow 2313*4bff34e3Sthurlow if (!write_lock) { 2314*4bff34e3Sthurlow (void) smbfs_rw_enter_sig(&np->r_rwlock, RW_READER, FALSE); 2315*4bff34e3Sthurlow return (V_WRITELOCK_FALSE); 2316*4bff34e3Sthurlow } 2317*4bff34e3Sthurlow 2318*4bff34e3Sthurlow 2319*4bff34e3Sthurlow (void) smbfs_rw_enter_sig(&np->r_rwlock, RW_WRITER, FALSE); 2320*4bff34e3Sthurlow return (V_WRITELOCK_TRUE); 2321*4bff34e3Sthurlow } 2322*4bff34e3Sthurlow 2323*4bff34e3Sthurlow /* ARGSUSED */ 2324*4bff34e3Sthurlow static void 2325*4bff34e3Sthurlow smbfs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp) 2326*4bff34e3Sthurlow { 2327*4bff34e3Sthurlow smbnode_t *np = VTOSMB(vp); 2328*4bff34e3Sthurlow 2329*4bff34e3Sthurlow smbfs_rw_exit(&np->r_rwlock); 2330*4bff34e3Sthurlow } 2331*4bff34e3Sthurlow 2332*4bff34e3Sthurlow 2333*4bff34e3Sthurlow /* ARGSUSED */ 2334*4bff34e3Sthurlow static int 2335*4bff34e3Sthurlow smbfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct) 2336*4bff34e3Sthurlow { 2337*4bff34e3Sthurlow smbmntinfo_t *smi; 2338*4bff34e3Sthurlow 2339*4bff34e3Sthurlow smi = VTOSMI(vp); 2340*4bff34e3Sthurlow 2341*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 2342*4bff34e3Sthurlow return (EPERM); 2343*4bff34e3Sthurlow 2344*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 2345*4bff34e3Sthurlow return (EIO); 2346*4bff34e3Sthurlow 2347*4bff34e3Sthurlow /* 2348*4bff34e3Sthurlow * Because we stuff the readdir cookie into the offset field 2349*4bff34e3Sthurlow * someone may attempt to do an lseek with the cookie which 2350*4bff34e3Sthurlow * we want to succeed. 2351*4bff34e3Sthurlow */ 2352*4bff34e3Sthurlow if (vp->v_type == VDIR) 2353*4bff34e3Sthurlow return (0); 2354*4bff34e3Sthurlow 2355*4bff34e3Sthurlow /* Like NFS3, just check for 63-bit overflow. */ 2356*4bff34e3Sthurlow if (*noffp < 0) 2357*4bff34e3Sthurlow return (EINVAL); 2358*4bff34e3Sthurlow 2359*4bff34e3Sthurlow return (0); 2360*4bff34e3Sthurlow } 2361*4bff34e3Sthurlow 2362*4bff34e3Sthurlow 2363*4bff34e3Sthurlow /* 2364*4bff34e3Sthurlow * XXX 2365*4bff34e3Sthurlow * This op may need to support PSARC 2007/440, nbmand changes for CIFS Service. 2366*4bff34e3Sthurlow */ 2367*4bff34e3Sthurlow static int 2368*4bff34e3Sthurlow smbfs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, 2369*4bff34e3Sthurlow offset_t offset, struct flk_callback *flk_cbp, cred_t *cr, 2370*4bff34e3Sthurlow caller_context_t *ct) 2371*4bff34e3Sthurlow { 2372*4bff34e3Sthurlow if (curproc->p_zone != VTOSMI(vp)->smi_zone) 2373*4bff34e3Sthurlow return (EIO); 2374*4bff34e3Sthurlow 2375*4bff34e3Sthurlow if (VTOSMI(vp)->smi_flags & SMI_LLOCK) 2376*4bff34e3Sthurlow return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct)); 2377*4bff34e3Sthurlow else 2378*4bff34e3Sthurlow return (ENOSYS); 2379*4bff34e3Sthurlow } 2380*4bff34e3Sthurlow 2381*4bff34e3Sthurlow /* 2382*4bff34e3Sthurlow * Free storage space associated with the specified vnode. The portion 2383*4bff34e3Sthurlow * to be freed is specified by bfp->l_start and bfp->l_len (already 2384*4bff34e3Sthurlow * normalized to a "whence" of 0). 2385*4bff34e3Sthurlow * 2386*4bff34e3Sthurlow * Called by fcntl(fd, F_FREESP, lkp) for libc:ftruncate, etc. 2387*4bff34e3Sthurlow */ 2388*4bff34e3Sthurlow /* ARGSUSED */ 2389*4bff34e3Sthurlow static int 2390*4bff34e3Sthurlow smbfs_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag, 2391*4bff34e3Sthurlow offset_t offset, cred_t *cr, caller_context_t *ct) 2392*4bff34e3Sthurlow { 2393*4bff34e3Sthurlow int error; 2394*4bff34e3Sthurlow smbmntinfo_t *smi; 2395*4bff34e3Sthurlow 2396*4bff34e3Sthurlow smi = VTOSMI(vp); 2397*4bff34e3Sthurlow 2398*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 2399*4bff34e3Sthurlow return (EIO); 2400*4bff34e3Sthurlow 2401*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 2402*4bff34e3Sthurlow return (EIO); 2403*4bff34e3Sthurlow 2404*4bff34e3Sthurlow ASSERT(vp->v_type == VREG); 2405*4bff34e3Sthurlow if (cmd != F_FREESP) 2406*4bff34e3Sthurlow return (EINVAL); 2407*4bff34e3Sthurlow 2408*4bff34e3Sthurlow /* 2409*4bff34e3Sthurlow * Like NFS3, no 32-bit offset checks here. 2410*4bff34e3Sthurlow * Our SMB layer takes care to return EFBIG 2411*4bff34e3Sthurlow * when it has to fallback to a 32-bit call. 2412*4bff34e3Sthurlow */ 2413*4bff34e3Sthurlow 2414*4bff34e3Sthurlow error = convoff(vp, bfp, 0, offset); 2415*4bff34e3Sthurlow if (!error) { 2416*4bff34e3Sthurlow ASSERT(bfp->l_start >= 0); 2417*4bff34e3Sthurlow if (bfp->l_len == 0) { 2418*4bff34e3Sthurlow struct vattr va; 2419*4bff34e3Sthurlow 2420*4bff34e3Sthurlow /* 2421*4bff34e3Sthurlow * ftruncate should not change the ctime and 2422*4bff34e3Sthurlow * mtime if we truncate the file to its 2423*4bff34e3Sthurlow * previous size. 2424*4bff34e3Sthurlow */ 2425*4bff34e3Sthurlow va.va_mask = AT_SIZE; 2426*4bff34e3Sthurlow error = smbfsgetattr(vp, &va, cr); 2427*4bff34e3Sthurlow if (error || va.va_size == bfp->l_start) 2428*4bff34e3Sthurlow return (error); 2429*4bff34e3Sthurlow va.va_mask = AT_SIZE; 2430*4bff34e3Sthurlow va.va_size = bfp->l_start; 2431*4bff34e3Sthurlow error = smbfssetattr(vp, &va, 0, cr); 2432*4bff34e3Sthurlow } else 2433*4bff34e3Sthurlow error = EINVAL; 2434*4bff34e3Sthurlow } 2435*4bff34e3Sthurlow 2436*4bff34e3Sthurlow return (error); 2437*4bff34e3Sthurlow } 2438*4bff34e3Sthurlow 2439*4bff34e3Sthurlow /* ARGSUSED */ 2440*4bff34e3Sthurlow static int 2441*4bff34e3Sthurlow smbfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr, 2442*4bff34e3Sthurlow caller_context_t *ct) 2443*4bff34e3Sthurlow { 2444*4bff34e3Sthurlow smbmntinfo_t *smi; 2445*4bff34e3Sthurlow struct smb_share *ssp; 2446*4bff34e3Sthurlow 2447*4bff34e3Sthurlow smi = VTOSMI(vp); 2448*4bff34e3Sthurlow 2449*4bff34e3Sthurlow if (curproc->p_zone != smi->smi_zone) 2450*4bff34e3Sthurlow return (EIO); 2451*4bff34e3Sthurlow 2452*4bff34e3Sthurlow if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED) 2453*4bff34e3Sthurlow return (EIO); 2454*4bff34e3Sthurlow 2455*4bff34e3Sthurlow switch (cmd) { 2456*4bff34e3Sthurlow case _PC_FILESIZEBITS: 2457*4bff34e3Sthurlow ssp = smi->smi_share; 2458*4bff34e3Sthurlow if (SSTOVC(ssp)->vc_sopt.sv_caps & SMB_CAP_LARGE_FILES) 2459*4bff34e3Sthurlow *valp = 64; 2460*4bff34e3Sthurlow else 2461*4bff34e3Sthurlow *valp = 32; 2462*4bff34e3Sthurlow break; 2463*4bff34e3Sthurlow 2464*4bff34e3Sthurlow case _PC_LINK_MAX: 2465*4bff34e3Sthurlow /* We only ever report one link to an object */ 2466*4bff34e3Sthurlow *valp = 1; 2467*4bff34e3Sthurlow break; 2468*4bff34e3Sthurlow 2469*4bff34e3Sthurlow case _PC_SYMLINK_MAX: /* No symlinks until we do Unix extensions */ 2470*4bff34e3Sthurlow case _PC_ACL_ENABLED: /* No ACLs yet - see FILE_PERSISTENT_ACLS bit */ 2471*4bff34e3Sthurlow case _PC_XATTR_EXISTS: /* No xattrs yet */ 2472*4bff34e3Sthurlow *valp = 0; 2473*4bff34e3Sthurlow break; 2474*4bff34e3Sthurlow 2475*4bff34e3Sthurlow default: 2476*4bff34e3Sthurlow return (fs_pathconf(vp, cmd, valp, cr, ct)); 2477*4bff34e3Sthurlow } 2478*4bff34e3Sthurlow return (0); 2479*4bff34e3Sthurlow } 2480*4bff34e3Sthurlow 2481*4bff34e3Sthurlow 2482*4bff34e3Sthurlow 2483*4bff34e3Sthurlow /* 2484*4bff34e3Sthurlow * XXX 2485*4bff34e3Sthurlow * This op should eventually support PSARC 2007/268. 2486*4bff34e3Sthurlow */ 2487*4bff34e3Sthurlow static int 2488*4bff34e3Sthurlow smbfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr, 2489*4bff34e3Sthurlow caller_context_t *ct) 2490*4bff34e3Sthurlow { 2491*4bff34e3Sthurlow if (curproc->p_zone != VTOSMI(vp)->smi_zone) 2492*4bff34e3Sthurlow return (EIO); 2493*4bff34e3Sthurlow 2494*4bff34e3Sthurlow if (VTOSMI(vp)->smi_flags & SMI_LLOCK) 2495*4bff34e3Sthurlow return (fs_shrlock(vp, cmd, shr, flag, cr, ct)); 2496*4bff34e3Sthurlow else 2497*4bff34e3Sthurlow return (ENOSYS); 2498*4bff34e3Sthurlow } 2499