本文整理汇总了C++中VN_HOLD函数的典型用法代码示例。如果您正苦于以下问题:C++ VN_HOLD函数的具体用法?C++ VN_HOLD怎么用?C++ VN_HOLD使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了VN_HOLD函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: smb_lookuppathvptovp
vnode_t *
smb_lookuppathvptovp(smb_request_t *sr, char *path, vnode_t *startvp,
vnode_t *rootvp)
{
pathname_t pn;
vnode_t *vp = NULL;
int lookup_flags = FOLLOW;
if (SMB_TREE_IS_CASEINSENSITIVE(sr))
lookup_flags |= FIGNORECASE;
(void) pn_alloc(&pn);
if (pn_set(&pn, path) == 0) {
VN_HOLD(startvp);
if (rootvp != rootdir)
VN_HOLD(rootvp);
/* lookuppnvp should release the holds */
if (lookuppnvp(&pn, NULL, lookup_flags, NULL, &vp,
rootvp, startvp, kcred) != 0) {
pn_free(&pn);
return (NULL);
}
}
pn_free(&pn);
return (vp);
}
示例2: r4find
/*
* Lookup a rnode by fhandle. Ignores rnodes that had failed recovery.
* Returns NULL if no match. If an rnode is returned, the reference count
* on the master vnode is incremented.
*
* The caller must be holding the hash queue lock, either shared or exclusive.
*/
rnode4_t *
r4find(r4hashq_t *rhtp, nfs4_sharedfh_t *fh, struct vfs *vfsp)
{
rnode4_t *rp;
vnode_t *vp;
ASSERT(RW_LOCK_HELD(&rhtp->r_lock));
for (rp = rhtp->r_hashf; rp != (rnode4_t *)rhtp; rp = rp->r_hashf) {
vp = RTOV4(rp);
if (vp->v_vfsp == vfsp && SFH4_SAME(rp->r_fh, fh)) {
mutex_enter(&rp->r_statelock);
if (rp->r_flags & R4RECOVERR) {
mutex_exit(&rp->r_statelock);
continue;
}
mutex_exit(&rp->r_statelock);
#ifdef DEBUG
r4_dup_check(rp, vfsp);
#endif
if (rp->r_freef != NULL) {
mutex_enter(&rp4freelist_lock);
/*
* If the rnode is on the freelist,
* then remove it and use that reference
* as the new reference. Otherwise,
* need to increment the reference count.
*/
if (rp->r_freef != NULL) {
rp4_rmfree(rp);
mutex_exit(&rp4freelist_lock);
} else {
mutex_exit(&rp4freelist_lock);
VN_HOLD(vp);
}
} else
VN_HOLD(vp);
/*
* if root vnode, set v_flag to indicate that
*/
if (isrootfh(fh, rp)) {
if (!(vp->v_flag & VROOT)) {
mutex_enter(&vp->v_lock);
vp->v_flag |= VROOT;
mutex_exit(&vp->v_lock);
}
}
return (rp);
}
}
return (NULL);
}
示例3: xfs_cap_vset
int
xfs_cap_vset(
vnode_t *vp,
void *cap,
size_t size)
{
posix_cap_xattr *xattr_cap = cap;
xfs_cap_set_t xfs_cap;
int error;
if (!cap)
return -EINVAL;
error = posix_cap_xattr_to_xfs(xattr_cap, size, &xfs_cap);
if (error)
return -error;
VN_HOLD(vp);
error = xfs_cap_allow_set(vp);
if (error)
goto out;
VOP_ATTR_SET(vp, SGI_CAP_LINUX, (char *)&xfs_cap,
sizeof(xfs_cap_set_t), ATTR_ROOT, sys_cred, error);
out:
VN_RELE(vp);
return -error;
}
示例4: dev_lopen
/*
* New Leaf driver open entry point. We make a vnode and go through specfs
* in order to obtain open close exclusions guarantees. Note that we drop
* OTYP_LYR if it was specified - we are going through specfs and it provides
* last close semantics (FKLYR is provided to open(9E)). Also, since
* spec_open will drive attach via e_ddi_hold_devi_by_dev for a makespecvp
* vnode with no SDIP_SET on the common snode, the dev_lopen caller no longer
* needs to call ddi_hold_installed_driver.
*/
int
dev_lopen(dev_t *devp, int flag, int otype, struct cred *cred)
{
struct vnode *vp;
int error;
struct vnode *cvp;
vp = makespecvp(*devp, (otype == OTYP_BLK) ? VBLK : VCHR);
error = VOP_OPEN(&vp, flag | FKLYR, cred, NULL);
if (error == 0) {
/* Pick up the (possibly) new dev_t value. */
*devp = vp->v_rdev;
/*
* Place extra hold on the common vnode, which contains the
* open count, so that it is not destroyed by the VN_RELE of
* the shadow makespecvp vnode below.
*/
cvp = STOV(VTOCS(vp));
VN_HOLD(cvp);
}
/* release the shadow makespecvp vnode. */
VN_RELE(vp);
return (error);
}
示例5: xfs_cap_vget
int
xfs_cap_vget(
vnode_t *vp,
void *cap,
size_t size)
{
int error;
int len = sizeof(xfs_cap_set_t);
int flags = ATTR_ROOT;
xfs_cap_set_t xfs_cap = { 0 };
posix_cap_xattr *xattr_cap = cap;
char *data = (char *)&xfs_cap;
VN_HOLD(vp);
if ((error = _MAC_VACCESS(vp, NULL, VREAD)))
goto out;
if (!size) {
flags |= ATTR_KERNOVAL;
data = NULL;
}
VOP_ATTR_GET(vp, SGI_CAP_LINUX, data, &len, flags, sys_cred, error);
if (error)
goto out;
ASSERT(len == sizeof(xfs_cap_set_t));
error = (size)? -posix_cap_xattr_size() :
-posix_cap_xfs_to_xattr(&xfs_cap, xattr_cap, size);
out:
VN_RELE(vp);
return -error;
}
示例6: zfsctl_root
/*
* Given a root znode, retrieve the associated .zfs directory.
* Add a hold to the vnode and return it.
*/
vnode_t *
zfsctl_root(znode_t *zp)
{
ASSERT(zfs_has_ctldir(zp));
VN_HOLD(zp->z_zfsvfs->z_ctldir);
return (zp->z_zfsvfs->z_ctldir);
}
示例7: linvfs_link
STATIC int
linvfs_link(
struct dentry *old_dentry,
struct inode *dir,
struct dentry *dentry)
{
struct inode *ip; /* inode of guy being linked to */
vnode_t *tdvp; /* target directory for new name/link */
vnode_t *vp; /* vp of name being linked */
int error;
ip = old_dentry->d_inode; /* inode being linked to */
if (S_ISDIR(ip->i_mode))
return -EPERM;
tdvp = LINVFS_GET_VP(dir);
vp = LINVFS_GET_VP(ip);
VOP_LINK(tdvp, vp, dentry, NULL, error);
if (!error) {
VMODIFY(tdvp);
VN_HOLD(vp);
validate_fields(ip);
d_instantiate(dentry, ip);
}
return -error;
}
示例8: gfs_file_create
/*
* gfs_file_create(): create a new GFS file
*
* size - size of private data structure (v_data)
* pvp - parent vnode (GFS directory)
* ops - vnode operations vector
*
* In order to use this interface, the parent vnode must have been created by
* gfs_dir_create(), and the private data stored in v_data must have a
* 'gfs_file_t' as its first field.
*
* Given these constraints, this routine will automatically:
*
* - Allocate v_data for the vnode
* - Initialize necessary fields in the vnode
* - Hold the parent
*/
vnode_t *
gfs_file_create(size_t size, vnode_t *pvp, vnodeops_t *ops)
{
gfs_file_t *fp;
vnode_t *vp;
/*
* Allocate vnode and internal data structure
*/
fp = kmem_zalloc(size, KM_SLEEP);
vp = vn_alloc(KM_SLEEP);
/*
* Set up various pointers
*/
fp->gfs_vnode = vp;
fp->gfs_parent = pvp;
vp->v_data = fp;
fp->gfs_size = size;
fp->gfs_type = GFS_FILE;
/*
* Initialize vnode and hold parent.
*/
vn_setops(vp, ops);
if (pvp) {
VN_SET_VFS_TYPE_DEV(vp, pvp->v_vfsp, VREG, 0);
VN_HOLD(pvp);
}
return (vp);
}
示例9: zfsctl_unmount_snap
static int
zfsctl_unmount_snap(zfs_snapentry_t *sep, int fflags, cred_t *cr)
{
vnode_t *svp = sep->se_root;
int error;
ASSERT(vn_ismntpt(svp));
/* this will be dropped by dounmount() */
if ((error = vn_vfswlock(svp)) != 0)
return (error);
VN_HOLD(svp);
error = dounmount(vn_mountedvfs(svp), fflags, cr);
if (error) {
VN_RELE(svp);
return (error);
}
/*
* We can't use VN_RELE(), as that will try to invoke
* zfsctl_snapdir_inactive(), which would cause us to destroy
* the sd_lock mutex held by our caller.
*/
ASSERT(svp->v_count == 1);
gfs_vop_inactive(svp, cr, NULL);
kmem_free(sep->se_name, strlen(sep->se_name) + 1);
kmem_free(sep, sizeof (zfs_snapentry_t));
return (0);
}
示例10: smbfs_root
/*
* find root of smbfs
*/
static int
smbfs_root(vfs_t *vfsp, vnode_t **vpp)
{
smbmntinfo_t *smi;
vnode_t *vp;
smi = VFTOSMI(vfsp);
if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
return (EPERM);
if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
return (EIO);
/*
* The root vp is created in mount and held
* until unmount, so this is paranoia.
*/
if (smi->smi_root == NULL)
return (EIO);
/* Just take a reference and return it. */
vp = SMBTOV(smi->smi_root);
VN_HOLD(vp);
*vpp = vp;
return (0);
}
示例11: smb_pathname_lookup
/*
* Holds on dvp and rootvp (if not rootdir) are required by lookuppnvp()
* and will be released within lookuppnvp().
*/
static int
smb_pathname_lookup(pathname_t *pn, pathname_t *rpn, int flags,
vnode_t **vp, vnode_t *rootvp, vnode_t *dvp, smb_attr_t *attr, cred_t *cred)
{
int err;
*vp = NULL;
VN_HOLD(dvp);
if (rootvp != rootdir)
VN_HOLD(rootvp);
err = lookuppnvp(pn, rpn, flags, NULL, vp, rootvp, dvp, cred);
if ((err == 0) && (attr != NULL))
(void) smb_vop_getattr(*vp, NULL, attr, 0, kcred);
return (err);
}
示例12: zfsctl_root
/*
* Given a root znode, retrieve the associated .zfs directory.
* Add a hold to the vnode and return it.
*/
vnode_t *
zfsctl_root(znode_t *zp)
{
ASSERT(zfs_has_ctldir(zp));
if (VN_HOLD(zp->z_zfsvfs->z_ctldir) != NULL)
return (zp->z_zfsvfs->z_ctldir);
else
return NULL;
}
示例13: fdroot
/* ARGSUSED */
static int
fdroot(vfs_t *vfsp, vnode_t **vpp)
{
vnode_t *vp = (vnode_t *)vfsp->vfs_data;
VN_HOLD(vp);
*vpp = vp;
return (0);
}
示例14: lookuppnatcred
/*
* Lookup the user file name from a given vp, using a specific credential.
*/
int
lookuppnatcred(
struct pathname *pnp, /* pathname to lookup */
struct pathname *rpnp, /* if non-NULL, return resolved path */
int followlink, /* (don't) follow sym links */
vnode_t **dirvpp, /* ptr for parent vnode */
vnode_t **compvpp, /* ptr for entry vnode */
vnode_t *startvp, /* start search from this vp */
cred_t *cr) /* user credential */
{
vnode_t *vp; /* current directory vp */
vnode_t *rootvp;
proc_t *p = curproc;
if (pnp->pn_pathlen == 0)
return (ENOENT);
mutex_enter(&p->p_lock); /* for u_rdir and u_cdir */
if ((rootvp = PTOU(p)->u_rdir) == NULL)
rootvp = rootdir;
else if (rootvp != rootdir) /* no need to VN_HOLD rootdir */
VN_HOLD(rootvp);
if (pnp->pn_path[0] == '/') {
vp = rootvp;
} else {
vp = (startvp == NULL) ? PTOU(p)->u_cdir : startvp;
}
VN_HOLD(vp);
mutex_exit(&p->p_lock);
/*
* Skip over leading slashes
*/
if (pnp->pn_path[0] == '/') {
do {
pnp->pn_path++;
pnp->pn_pathlen--;
} while (pnp->pn_path[0] == '/');
}
return (lookuppnvp(pnp, rpnp, followlink, dirvpp,
compvpp, rootvp, vp, cr));
}
示例15: spec_sync
/*ARGSUSED*/
int
spec_sync(struct vfs *vfsp,
short flag,
struct cred *cr)
{
struct snode *sync_list;
register struct snode **spp, *sp, *spnext;
register struct vnode *vp;
if (mutex_tryenter(&spec_syncbusy) == 0)
return (0);
if (flag & SYNC_ATTR) {
mutex_exit(&spec_syncbusy);
return (0);
}
mutex_enter(&stable_lock);
sync_list = NULL;
/*
* Find all the snodes that are dirty and add them to the sync_list
*/
for (spp = stable; spp < &stable[STABLESIZE]; spp++) {
for (sp = *spp; sp != NULL; sp = sp->s_next) {
vp = STOV(sp);
/*
* Don't bother sync'ing a vp if it's
* part of a virtual swap device.
*/
if (IS_SWAPVP(vp))
continue;
if (vp->v_type == VBLK && vn_has_cached_data(vp)) {
/*
* Prevent vp from going away before we
* we get a chance to do a VOP_PUTPAGE
* via sync_list processing
*/
VN_HOLD(vp);
sp->s_list = sync_list;
sync_list = sp;
}
}
}
mutex_exit(&stable_lock);
/*
* Now write out all the snodes we marked asynchronously.
*/
for (sp = sync_list; sp != NULL; sp = spnext) {
spnext = sp->s_list;
vp = STOV(sp);
(void) VOP_PUTPAGE(vp, (offset_t)0, (uint_t)0, B_ASYNC, cr);
VN_RELE(vp); /* Release our hold on vnode */
}
mutex_exit(&spec_syncbusy);
return (0);
}