本文整理汇总了C++中VI_UNLOCK函数的典型用法代码示例。如果您正苦于以下问题:C++ VI_UNLOCK函数的具体用法?C++ VI_UNLOCK怎么用?C++ VI_UNLOCK使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了VI_UNLOCK函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: osi_VM_FlushVCache
/* Try to discard pages, in order to recycle a vcache entry.
*
* We also make some sanity checks: ref count, open count, held locks.
*
* We also do some non-VM-related chores, such as releasing the cred pointer
* (for AIX and Solaris) and releasing the gnode (for AIX).
*
* Locking: afs_xvcache lock is held. If it is dropped and re-acquired,
* *slept should be set to warn the caller.
*
* Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it
* is not dropped and re-acquired for any platform. It may be that *slept is
* therefore obsolescent.
*
*/
int
osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
struct vnode *vp;
int code;
vp = AFSTOV(avc);
if (!VI_TRYLOCK(vp))
return EBUSY;
code = osi_fbsd_checkinuse(avc);
if (code) {
VI_UNLOCK(vp);
return code;
}
/* must hold the vnode before calling cache_purge()
* This code largely copied from vfs_subr.c:vlrureclaim() */
vholdl(vp);
VI_UNLOCK(vp);
AFS_GUNLOCK();
cache_purge(vp);
AFS_GLOCK();
vdrop(vp);
return 0;
}
示例2: osi_TryEvictVCache
int
osi_TryEvictVCache(struct vcache *avc, int *slept, int defersleep)
{
struct vnode *vp;
int code;
vp = AFSTOV(avc);
if (!VI_TRYLOCK(vp))
return 0;
code = osi_fbsd_checkinuse(avc);
if (code != 0) {
VI_UNLOCK(vp);
return 0;
}
if ((vp->v_iflag & VI_DOOMED) != 0) {
VI_UNLOCK(vp);
return 1;
}
/* must hold the vnode before calling vgone()
* This code largely copied from vfs_subr.c:vlrureclaim() */
vholdl(vp);
AFS_GUNLOCK();
*slept = 1;
/* use the interlock while locking, so no one else can DOOM this */
ma_vn_lock(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread);
vgone(vp);
MA_VOP_UNLOCK(vp, 0, curthread);
vdrop(vp);
AFS_GLOCK();
return 1;
}
示例3: null_unlock
/*
* We need to process our own vnode unlock and then clear the
* interlock flag as it applies only to our vnode, not the
* vnodes below us on the stack.
*/
static int
null_unlock(struct vop_unlock_args *ap)
{
struct vnode *vp = ap->a_vp;
int flags = ap->a_flags;
int mtxlkflag = 0;
struct null_node *nn;
struct vnode *lvp;
int error;
if ((flags & LK_INTERLOCK) != 0)
mtxlkflag = 1;
else if (mtx_owned(VI_MTX(vp)) == 0) {
VI_LOCK(vp);
mtxlkflag = 2;
}
nn = VTONULL(vp);
if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
VI_LOCK_FLAGS(lvp, MTX_DUPOK);
flags |= LK_INTERLOCK;
vholdl(lvp);
VI_UNLOCK(vp);
error = VOP_UNLOCK(lvp, flags);
vdrop(lvp);
if (mtxlkflag == 0)
VI_LOCK(vp);
} else {
if (mtxlkflag == 2)
VI_UNLOCK(vp);
error = vop_stdunlock(ap);
}
return (error);
}
示例4: osi_VM_TryToSmush
/* Try to invalidate pages, for "fs flush" or "fs flushv"; or
* try to free pages, when deleting a file.
*
* Locking: the vcache entry's lock is held. It may be dropped and
* re-obtained.
*
* Since we drop and re-obtain the lock, we can't guarantee that there won't
* be some pages around when we return, newly created by concurrent activity.
*/
void
osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync)
{
struct vnode *vp;
int tries, code;
int islocked;
vp = AFSTOV(avc);
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
VI_UNLOCK(vp);
return;
}
VI_UNLOCK(vp);
islocked = islocked_vnode(vp);
if (islocked == LK_EXCLOTHER)
panic("Trying to Smush over someone else's lock");
else if (islocked == LK_SHARED) {
afs_warn("Trying to Smush with a shared lock");
lock_vnode(vp, LK_UPGRADE);
} else if (!islocked)
lock_vnode(vp, LK_EXCLUSIVE);
if (vp->v_bufobj.bo_object != NULL) {
AFS_VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
/*
* Do we really want OBJPC_SYNC? OBJPC_INVAL would be
* faster, if invalidation is really what we are being
* asked to do. (It would make more sense, too, since
* otherwise this function is practically identical to
* osi_VM_StoreAllSegments().) -GAW
*/
/*
* Dunno. We no longer resemble osi_VM_StoreAllSegments,
* though maybe that's wrong, now. And OBJPC_SYNC is the
* common thing in 70 file systems, it seems. Matt.
*/
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
AFS_VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
}
tries = 5;
code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
while (code && (tries > 0)) {
afs_warn("TryToSmush retrying vinvalbuf");
code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
--tries;
}
if (islocked == LK_SHARED)
lock_vnode(vp, LK_DOWNGRADE);
else if (!islocked)
unlock_vnode(vp);
}
示例5: osi_VM_FlushVCache
/* Try to discard pages, in order to recycle a vcache entry.
*
* We also make some sanity checks: ref count, open count, held locks.
*
* We also do some non-VM-related chores, such as releasing the cred pointer
* (for AIX and Solaris) and releasing the gnode (for AIX).
*
* Locking: afs_xvcache lock is held. If it is dropped and re-acquired,
* *slept should be set to warn the caller.
*
* Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it
* is not dropped and re-acquired for any platform. It may be that *slept is
* therefore obsolescent.
*
*/
int
osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
struct vm_object *obj;
struct vnode *vp = AFSTOV(avc);
if (!VI_TRYLOCK(vp)) /* need interlock to check usecount */
return EBUSY;
if (vp->v_usecount > 0) {
VI_UNLOCK(vp);
return EBUSY;
}
/* XXX
* The value of avc->opens here came to be, at some point,
* typically -1. This was caused by incorrectly performing afs_close
* processing on vnodes being recycled */
if (avc->opens) {
VI_UNLOCK(vp);
return EBUSY;
}
/* if a lock is held, give up */
if (CheckLock(&avc->lock)) {
VI_UNLOCK(vp);
return EBUSY;
}
if ((vp->v_iflag & VI_DOOMED) != 0) {
VI_UNLOCK(vp);
return (0);
}
/* must hold the vnode before calling vgone()
* This code largely copied from vfs_subr.c:vlrureclaim() */
vholdl(vp);
AFS_GUNLOCK();
*slept = 1;
/* use the interlock while locking, so no one else can DOOM this */
ilock_vnode(vp);
vgone(vp);
unlock_vnode(vp);
vdrop(vp);
AFS_GLOCK();
return 0;
}
示例6: vnode_isrecycled
/* is this vnode under recyle now */
int vnode_isrecycled(vnode_t vp)
{
int ret;
VI_LOCK(vp);
ret = (vp->v_iflag & VI_DOOMED)? 1 : 0;
VI_UNLOCK(vp);
return(ret);
}
示例7: vn_rele_async
/*
* Like vn_rele() except if we are going to call VOP_INACTIVE() then do it
* asynchronously using a taskq. This can avoid deadlocks caused by re-entering
* the file system as a result of releasing the vnode. Note, file systems
* already have to handle the race where the vnode is incremented before the
* inactive routine is called and does its locking.
*
* Warning: Excessive use of this routine can lead to performance problems.
* This is because taskqs throttle back allocation if too many are created.
*/
void
vn_rele_async(vnode_t *vp, taskq_t *taskq)
{
VERIFY(vp->v_count > 0);
VI_LOCK(vp);
if (vp->v_count == 1 && !(vp->v_iflag & VI_DOINGINACT)) {
VI_UNLOCK(vp);
VERIFY(taskq_dispatch((taskq_t *)taskq,
(task_func_t *)vn_rele_inactive, vp, TQ_SLEEP) != 0);
return;
}
vp->v_usecount--;
vdropl(vp);
}
示例8: qsync
/*
* Q_SYNC - sync quota files to disk.
*/
int
qsync(struct mount *mp)
{
struct ufsmount *ump = VFSTOUFS(mp);
struct thread *td = curthread; /* XXX */
struct vnode *vp, *mvp;
struct dquot *dq;
int i, error;
/*
* Check if the mount point has any quotas.
* If not, simply return.
*/
UFS_LOCK(ump);
for (i = 0; i < MAXQUOTAS; i++)
if (ump->um_quotas[i] != NULLVP)
break;
UFS_UNLOCK(ump);
if (i == MAXQUOTAS)
return (0);
/*
* Search vnodes associated with this mount point,
* synchronizing any modified dquot structures.
*/
again:
MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
continue;
}
error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td);
if (error) {
if (error == ENOENT) {
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto again;
}
continue;
}
for (i = 0; i < MAXQUOTAS; i++) {
dq = VTOI(vp)->i_dquot[i];
if (dq != NODQUOT)
dqsync(vp, dq);
}
vput(vp);
}
return (0);
}
示例9: null_lock
/*
* We need to process our own vnode lock and then clear the
* interlock flag as it applies only to our vnode, not the
* vnodes below us on the stack.
*/
static int
null_lock(struct vop_lock1_args *ap)
{
struct vnode *vp = ap->a_vp;
int flags = ap->a_flags;
struct null_node *nn;
struct vnode *lvp;
int error;
if ((flags & LK_INTERLOCK) == 0) {
VI_LOCK(vp);
ap->a_flags = flags |= LK_INTERLOCK;
}
nn = VTONULL(vp);
/*
* If we're still active we must ask the lower layer to
* lock as ffs has special lock considerations in it's
* vop lock.
*/
if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
VI_LOCK_FLAGS(lvp, MTX_DUPOK);
VI_UNLOCK(vp);
/*
* We have to hold the vnode here to solve a potential
* reclaim race. If we're forcibly vgone'd while we
* still have refs, a thread could be sleeping inside
* the lowervp's vop_lock routine. When we vgone we will
* drop our last ref to the lowervp, which would allow it
* to be reclaimed. The lowervp could then be recycled,
* in which case it is not legal to be sleeping in it's VOP.
* We prevent it from being recycled by holding the vnode
* here.
*/
vholdl(lvp);
error = VOP_LOCK(lvp, flags);
/*
* We might have slept to get the lock and someone might have
* clean our vnode already, switching vnode lock from one in
* lowervp to v_lock in our own vnode structure. Handle this
* case by reacquiring correct lock in requested mode.
*/
if (VTONULL(vp) == NULL && error == 0) {
ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
switch (flags & LK_TYPE_MASK) {
case LK_SHARED:
ap->a_flags |= LK_SHARED;
break;
case LK_UPGRADE:
case LK_EXCLUSIVE:
ap->a_flags |= LK_EXCLUSIVE;
break;
default:
panic("Unsupported lock request %d\n",
ap->a_flags);
}
VOP_UNLOCK(lvp, 0);
error = vop_stdlock(ap);
}
vdrop(lvp);
} else
error = vop_stdlock(ap);
return (error);
}
示例10: vfs_mountroot_shuffle
static int
vfs_mountroot_shuffle(struct thread *td, struct mount *mpdevfs)
{
struct nameidata nd;
struct mount *mporoot, *mpnroot;
struct vnode *vp, *vporoot, *vpdevfs;
char *fspath;
int error;
mpnroot = TAILQ_NEXT(mpdevfs, mnt_list);
/* Shuffle the mountlist. */
mtx_lock(&mountlist_mtx);
mporoot = TAILQ_FIRST(&mountlist);
TAILQ_REMOVE(&mountlist, mpdevfs, mnt_list);
if (mporoot != mpdevfs) {
TAILQ_REMOVE(&mountlist, mpnroot, mnt_list);
TAILQ_INSERT_HEAD(&mountlist, mpnroot, mnt_list);
}
TAILQ_INSERT_TAIL(&mountlist, mpdevfs, mnt_list);
mtx_unlock(&mountlist_mtx);
cache_purgevfs(mporoot);
if (mporoot != mpdevfs)
cache_purgevfs(mpdevfs);
VFS_ROOT(mporoot, LK_EXCLUSIVE, &vporoot);
VI_LOCK(vporoot);
vporoot->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vporoot);
vporoot->v_mountedhere = NULL;
mporoot->mnt_flag &= ~MNT_ROOTFS;
mporoot->mnt_vnodecovered = NULL;
vput(vporoot);
/* Set up the new rootvnode, and purge the cache */
mpnroot->mnt_vnodecovered = NULL;
set_rootvnode();
cache_purgevfs(rootvnode->v_mount);
if (mporoot != mpdevfs) {
/* Remount old root under /.mount or /mnt */
fspath = "/.mount";
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
fspath, td);
error = namei(&nd);
if (error) {
NDFREE(&nd, NDF_ONLY_PNBUF);
fspath = "/mnt";
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
fspath, td);
error = namei(&nd);
}
if (!error) {
vp = nd.ni_vp;
error = (vp->v_type == VDIR) ? 0 : ENOTDIR;
if (!error)
error = vinvalbuf(vp, V_SAVE, 0, 0);
if (!error) {
cache_purge(vp);
mporoot->mnt_vnodecovered = vp;
vp->v_mountedhere = mporoot;
strlcpy(mporoot->mnt_stat.f_mntonname,
fspath, MNAMELEN);
VOP_UNLOCK(vp, 0);
} else
vput(vp);
}
NDFREE(&nd, NDF_ONLY_PNBUF);
if (error && bootverbose)
printf("mountroot: unable to remount previous root "
"under /.mount or /mnt (error %d).\n", error);
}
/* Remount devfs under /dev */
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, "/dev", td);
error = namei(&nd);
if (!error) {
vp = nd.ni_vp;
error = (vp->v_type == VDIR) ? 0 : ENOTDIR;
if (!error)
error = vinvalbuf(vp, V_SAVE, 0, 0);
if (!error) {
vpdevfs = mpdevfs->mnt_vnodecovered;
if (vpdevfs != NULL) {
cache_purge(vpdevfs);
vpdevfs->v_mountedhere = NULL;
vrele(vpdevfs);
}
mpdevfs->mnt_vnodecovered = vp;
vp->v_mountedhere = mpdevfs;
VOP_UNLOCK(vp, 0);
} else
vput(vp);
}
if (error && bootverbose)
printf("mountroot: unable to remount devfs under /dev "
"(error %d).\n", error);
//.........这里部分代码省略.........
示例11: quotaoff1
/*
* Main code to turn off disk quotas for a filesystem. Does not change
* flags.
*/
static int
quotaoff1(struct thread *td, struct mount *mp, int type)
{
struct vnode *vp;
struct vnode *qvp, *mvp;
struct ufsmount *ump;
struct dquot *dq;
struct inode *ip;
struct ucred *cr;
int error;
ump = VFSTOUFS(mp);
UFS_LOCK(ump);
KASSERT((ump->um_qflags[type] & QTF_CLOSING) != 0,
("quotaoff1: flags are invalid"));
if ((qvp = ump->um_quotas[type]) == NULLVP) {
UFS_UNLOCK(ump);
return (0);
}
cr = ump->um_cred[type];
UFS_UNLOCK(ump);
/*
* Search vnodes associated with this mount point,
* deleting any references to quota file being closed.
*/
again:
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
continue;
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto again;
}
ip = VTOI(vp);
dq = ip->i_dquot[type];
ip->i_dquot[type] = NODQUOT;
dqrele(vp, dq);
VOP_UNLOCK(vp, 0);
vrele(vp);
}
error = dqflush(qvp);
if (error != 0)
return (error);
/*
* Clear um_quotas before closing the quota vnode to prevent
* access to the closed vnode from dqget/dqsync
*/
UFS_LOCK(ump);
ump->um_quotas[type] = NULLVP;
ump->um_cred[type] = NOCRED;
UFS_UNLOCK(ump);
vn_lock(qvp, LK_EXCLUSIVE | LK_RETRY);
qvp->v_vflag &= ~VV_SYSTEM;
VOP_UNLOCK(qvp, 0);
error = vn_close(qvp, FREAD|FWRITE, td->td_ucred, td);
crfree(cr);
return (error);
}
示例12: unionfs_noderem
/*
* Clean up the unionfs node.
*/
void
unionfs_noderem(struct vnode *vp, struct thread *td)
{
int count;
struct unionfs_node *unp, *unp_t1, *unp_t2;
struct unionfs_node_hashhead *hd;
struct unionfs_node_status *unsp, *unsp_tmp;
struct vnode *lvp;
struct vnode *uvp;
struct vnode *dvp;
/*
* Use the interlock to protect the clearing of v_data to
* prevent faults in unionfs_lock().
*/
VI_LOCK(vp);
unp = VTOUNIONFS(vp);
lvp = unp->un_lowervp;
uvp = unp->un_uppervp;
dvp = unp->un_dvp;
unp->un_lowervp = unp->un_uppervp = NULLVP;
vp->v_vnlock = &(vp->v_lock);
vp->v_data = NULL;
vp->v_object = NULL;
VI_UNLOCK(vp);
if (lvp != NULLVP)
VOP_UNLOCK(lvp, LK_RELEASE);
if (uvp != NULLVP)
VOP_UNLOCK(uvp, LK_RELEASE);
if (dvp != NULLVP && unp->un_hash.le_prev != NULL)
unionfs_rem_cached_vnode(unp, dvp);
if (lockmgr(vp->v_vnlock, LK_EXCLUSIVE, VI_MTX(vp)) != 0)
panic("the lock for deletion is unacquirable.");
if (lvp != NULLVP)
vrele(lvp);
if (uvp != NULLVP)
vrele(uvp);
if (dvp != NULLVP) {
vrele(dvp);
unp->un_dvp = NULLVP;
}
if (unp->un_path != NULL) {
free(unp->un_path, M_UNIONFSPATH);
unp->un_path = NULL;
}
if (unp->un_hashtbl != NULL) {
for (count = 0; count <= unp->un_hashmask; count++) {
hd = unp->un_hashtbl + count;
LIST_FOREACH_SAFE(unp_t1, hd, un_hash, unp_t2) {
LIST_REMOVE(unp_t1, un_hash);
unp_t1->un_hash.le_next = NULL;
unp_t1->un_hash.le_prev = NULL;
}
}
hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask);
}
示例13: msdosfs_unmount
/*
* Unmount the filesystem described by mp.
*/
static int
msdosfs_unmount(struct mount *mp, int mntflags)
{
struct msdosfsmount *pmp;
int error, flags;
error = flags = 0;
pmp = VFSTOMSDOSFS(mp);
if ((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0)
error = msdosfs_sync(mp, MNT_WAIT);
if ((mntflags & MNT_FORCE) != 0)
flags |= FORCECLOSE;
else if (error != 0)
return (error);
error = vflush(mp, 0, flags, curthread);
if (error != 0 && error != ENXIO)
return (error);
if ((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0) {
error = markvoldirty(pmp, 0);
if (error && error != ENXIO) {
(void)markvoldirty(pmp, 1);
return (error);
}
}
if (pmp->pm_flags & MSDOSFSMNT_KICONV && msdosfs_iconv) {
if (pmp->pm_w2u)
msdosfs_iconv->close(pmp->pm_w2u);
if (pmp->pm_u2w)
msdosfs_iconv->close(pmp->pm_u2w);
if (pmp->pm_d2u)
msdosfs_iconv->close(pmp->pm_d2u);
if (pmp->pm_u2d)
msdosfs_iconv->close(pmp->pm_u2d);
}
#ifdef MSDOSFS_DEBUG
{
struct vnode *vp = pmp->pm_devvp;
struct bufobj *bo;
bo = &vp->v_bufobj;
BO_LOCK(bo);
VI_LOCK(vp);
vn_printf(vp,
"msdosfs_umount(): just before calling VOP_CLOSE()\n");
printf("freef %p, freeb %p, mount %p\n",
TAILQ_NEXT(vp, v_actfreelist), vp->v_actfreelist.tqe_prev,
vp->v_mount);
printf("cleanblkhd %p, dirtyblkhd %p, numoutput %ld, type %d\n",
TAILQ_FIRST(&vp->v_bufobj.bo_clean.bv_hd),
TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd),
vp->v_bufobj.bo_numoutput, vp->v_type);
VI_UNLOCK(vp);
BO_UNLOCK(bo);
}
#endif
DROP_GIANT();
if (pmp->pm_devvp->v_type == VCHR && pmp->pm_devvp->v_rdev != NULL)
pmp->pm_devvp->v_rdev->si_mountpt = NULL;
g_topology_lock();
g_vfs_close(pmp->pm_cp);
g_topology_unlock();
PICKUP_GIANT();
vrele(pmp->pm_devvp);
dev_rel(pmp->pm_dev);
free(pmp->pm_inusemap, M_MSDOSFSFAT);
if (pmp->pm_flags & MSDOSFS_LARGEFS)
msdosfs_fileno_free(mp);
lockdestroy(&pmp->pm_fatlock);
free(pmp, M_MSDOSFSMNT);
mp->mnt_data = NULL;
MNT_ILOCK(mp);
mp->mnt_flag &= ~MNT_LOCAL;
MNT_IUNLOCK(mp);
return (error);
}
示例14: gfs_file_inactive
/*
* gfs_file_inactive()
*
* Called from the VOP_INACTIVE() routine. If necessary, this routine will
* remove the given vnode from the parent directory and clean up any references
* in the VFS layer.
*
* If the vnode was not removed (due to a race with vget), then NULL is
* returned. Otherwise, a pointer to the private data is returned.
*/
void *
gfs_file_inactive(struct vnode *vp)
{
int i;
gfs_dirent_t *ge = NULL;
gfs_file_t *fp = vnode_fsnode(vp);
gfs_dir_t *dp = NULL;
void *data;
if (!fp) return NULL;
if (fp->gfs_parent == NULL /*|| (vp->v_flag & V_XATTRDIR)*/)
goto found;
/*
* XXX cope with a FreeBSD-specific race wherein the parent's
* snapshot data can be freed before the parent is
*/
if ((dp = vnode_fsnode(fp->gfs_parent)) == NULL)
return (NULL);
/*
* First, see if this vnode is cached in the parent.
*/
gfs_dir_lock(dp);
/*
* Find it in the set of static entries.
*/
for (i = 0; i < dp->gfsd_nstatic; i++) {
ge = &dp->gfsd_static[i];
if (ge->gfse_vnode == vp)
goto found;
}
/*
* If 'ge' is NULL, then it is a dynamic entry.
*/
ge = NULL;
found:
#ifdef TODO
if (vp->v_flag & V_XATTRDIR)
VI_LOCK(fp->gfs_parent);
#endif
VN_HOLD(vp);
/*
* Really remove this vnode
*/
data = vnode_fsnode(vp);
if (ge != NULL) {
/*
* If this was a statically cached entry, simply set the
* cached vnode to NULL.
*/
ge->gfse_vnode = NULL;
}
VN_RELE(vp);
/*
* Free vnode and release parent
*/
dprintf("freeing vp %p and parent %p\n", vp, fp->gfs_parent);
if (fp->gfs_parent) {
if (dp)
gfs_dir_unlock(dp);
//VOP_UNLOCK(vp, 0);
VN_RELE(fp->gfs_parent);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
} else {
ASSERT(vp->v_vfsp != NULL);
VFS_RELE(vp->v_vfsp);
}
#ifdef TODO
if (vp->v_flag & V_XATTRDIR)
VI_UNLOCK(fp->gfs_parent);
#endif
return (data);
}
示例15: mount_snapshot
int
mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype, char *fspath,
char *fspec, int fsflags)
{
struct vfsconf *vfsp;
struct mount *mp;
vnode_t *vp, *mvp;
struct ucred *cr;
int error;
/*
* Be ultra-paranoid about making sure the type and fspath
* variables will fit in our mp buffers, including the
* terminating NUL.
*/
if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN)
return (ENAMETOOLONG);
vfsp = vfs_byname_kld(fstype, td, &error);
if (vfsp == NULL)
return (ENODEV);
vp = *vpp;
if (vp->v_type != VDIR)
return (ENOTDIR);
/*
* We need vnode lock to protect v_mountedhere and vnode interlock
* to protect v_iflag.
*/
vn_lock(vp, LK_SHARED | LK_RETRY);
VI_LOCK(vp);
if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) {
VI_UNLOCK(vp);
VOP_UNLOCK(vp, 0);
return (EBUSY);
}
vp->v_iflag |= VI_MOUNT;
VI_UNLOCK(vp);
VOP_UNLOCK(vp, 0);
/*
* Allocate and initialize the filesystem.
* We don't want regular user that triggered snapshot mount to be able
* to unmount it, so pass credentials of the parent mount.
*/
mp = vfs_mount_alloc(vp, vfsp, fspath, vp->v_mount->mnt_cred);
mp->mnt_optnew = NULL;
vfs_setmntopt(mp, "from", fspec, 0);
mp->mnt_optnew = mp->mnt_opt;
mp->mnt_opt = NULL;
/*
* Set the mount level flags.
*/
mp->mnt_flag = fsflags & MNT_UPDATEMASK;
/*
* Snapshots are always read-only.
*/
mp->mnt_flag |= MNT_RDONLY;
/*
* We don't want snapshots to allow access to vulnerable setuid
* programs, so we turn off setuid when mounting snapshots.
*/
mp->mnt_flag |= MNT_NOSUID;
/*
* We don't want snapshots to be visible in regular
* mount(8) and df(1) output.
*/
mp->mnt_flag |= MNT_IGNORE;
/*
* XXX: This is evil, but we can't mount a snapshot as a regular user.
* XXX: Is is safe when snapshot is mounted from within a jail?
*/
cr = td->td_ucred;
td->td_ucred = kcred;
error = VFS_MOUNT(mp);
td->td_ucred = cr;
if (error != 0) {
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vp);
vrele(vp);
vfs_unbusy(mp);
vfs_mount_destroy(mp);
*vpp = NULL;
return (error);
}
if (mp->mnt_opt != NULL)
vfs_freeopts(mp->mnt_opt);
mp->mnt_opt = mp->mnt_optnew;
(void)VFS_STATFS(mp, &mp->mnt_stat);
/*
* Prevent external consumers of mount options from reading
* mnt_optnew.
*/
mp->mnt_optnew = NULL;
//.........这里部分代码省略.........