本文整理汇总了C++中SA_ZPL_SIZE函数的典型用法代码示例。如果您正苦于以下问题:C++ SA_ZPL_SIZE函数的具体用法?C++ SA_ZPL_SIZE怎么用?C++ SA_ZPL_SIZE使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SA_ZPL_SIZE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: dirattrpack
void dirattrpack(attrinfo_t *aip, znode_t *zp)
{
attrgroup_t dirattr = aip->ai_attrlist->dirattr;
void *attrbufptr = *aip->ai_attrbufpp;
if (ATTR_DIR_LINKCOUNT & dirattr) {
*((u_int32_t *)attrbufptr) = 1; /* no dir hard links */
attrbufptr = ((u_int32_t *)attrbufptr) + 1;
}
if (ATTR_DIR_ENTRYCOUNT & dirattr) {
uint64_t val;
VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs),
&val, sizeof(val)) == 0);
*((u_int32_t *)attrbufptr) = (uint32_t)val;
attrbufptr = ((u_int32_t *)attrbufptr) + 1;
}
if (ATTR_DIR_MOUNTSTATUS & dirattr && zp) {
vnode_t *vp = ZTOV(zp);
if (vp != NULL && vnode_mountedhere(vp) != NULL)
*((u_int32_t *)attrbufptr) = DIR_MNTSTATUS_MNTPOINT;
else
*((u_int32_t *)attrbufptr) = 0;
attrbufptr = ((u_int32_t *)attrbufptr) + 1;
}
*aip->ai_attrbufpp = attrbufptr;
}
示例2: __osd_attr_init
int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
sa_handle_t *sa_hdl, dmu_tx_t *tx,
struct lu_attr *la, uint64_t parent)
{
sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
uint64_t gen;
uint64_t crtime[2];
timestruc_t now;
int cnt;
int rc;
LASSERT(sa_hdl);
gen = dmu_tx_get_txg(tx);
gethrestime(&now);
ZFS_TIME_ENCODE(&now, crtime);
osa->atime[0] = la->la_atime;
osa->ctime[0] = la->la_ctime;
osa->mtime[0] = la->la_mtime;
osa->mode = la->la_mode;
osa->uid = la->la_uid;
osa->gid = la->la_gid;
osa->rdev = la->la_rdev;
osa->nlink = la->la_nlink;
osa->flags = attrs_fs2zfs(la->la_flags);
osa->size = la->la_size;
/*
* we need to create all SA below upon object create.
*
* XXX The attribute order matters since the accounting callback relies
* on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
* look up the UID/GID attributes. Moreover, the callback does not seem
* to support the spill block.
* We define attributes in the same order as SA_*_OFFSET in order to
* work around the problem. See ORI-610.
*/
cnt = 0;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, crtime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
return rc;
}
示例3: __osd_xattr_get_large
int __osd_xattr_get_large(const struct lu_env *env, struct osd_device *osd,
uint64_t xattr, struct lu_buf *buf,
const char *name, int *sizep)
{
dmu_buf_t *xa_data_db;
sa_handle_t *sa_hdl = NULL;
uint64_t xa_data_obj, size;
int rc;
/* are there any extended attributes? */
if (xattr == ZFS_NO_OBJECT)
return -ENOENT;
/* Lookup the object number containing the xattr data */
rc = -zap_lookup(osd->od_os, xattr, name, sizeof(uint64_t), 1,
&xa_data_obj);
if (rc)
return rc;
rc = __osd_obj2dbuf(env, osd->od_os, xa_data_obj, &xa_data_db);
if (rc)
return rc;
rc = -sa_handle_get(osd->od_os, xa_data_obj, NULL, SA_HDL_PRIVATE,
&sa_hdl);
if (rc)
goto out_rele;
/* Get the xattr value length / object size */
rc = -sa_lookup(sa_hdl, SA_ZPL_SIZE(osd), &size, 8);
if (rc)
goto out;
if (size > INT_MAX) {
rc = -EOVERFLOW;
goto out;
}
*sizep = (int)size;
if (buf == NULL || buf->lb_buf == NULL) {
/* We only need to return the required size */
goto out;
}
if (*sizep > buf->lb_len) {
rc = -ERANGE; /* match ldiskfs error */
goto out;
}
rc = -dmu_read(osd->od_os, xa_data_db->db_object, 0,
size, buf->lb_buf, DMU_READ_PREFETCH);
out:
sa_handle_destroy(sa_hdl);
out_rele:
dmu_buf_rele(xa_data_db, FTAG);
return rc;
}
示例4: zfs_replay_write2
/*
* TX_WRITE2 are only generated when dmu_sync() returns EALREADY
* meaning the pool block is already being synced. So now that we always write
* out full blocks, all we have to do is expand the eof if
* the file is grown.
*/
static int
zfs_replay_write2(zfsvfs_t *zfsvfs, void *data, boolean_t byteswap)
{
#ifndef TODO_OSV
kprintf("TX_WRITE2\n");
return EOPNOTSUPP;
#else
lr_write_t *lr = data;
znode_t *zp;
int error;
uint64_t end;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
top:
end = lr->lr_offset + lr->lr_length;
if (end > zp->z_size) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
zp->z_size = end;
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
VN_RELE(ZTOV(zp));
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
return (error);
}
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
/* Ensure the replayed seq is updated */
(void) zil_replaying(zfsvfs->z_log, tx);
dmu_tx_commit(tx);
}
VN_RELE(ZTOV(zp));
return (error);
#endif
}
示例5: zfs_replay_write2
/*
* TX_WRITE2 are only generated when dmu_sync() returns EALREADY
* meaning the pool block is already being synced. So now that we always write
* out full blocks, all we have to do is expand the eof if
* the file is grown.
*/
static int
zfs_replay_write2(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_write_t *lr = arg2;
znode_t *zp;
int error;
uint64_t end;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
top:
end = lr->lr_offset + lr->lr_length;
if (end > zp->z_size) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
zp->z_size = end;
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
iput(ZTOI(zp));
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
return (error);
}
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
/* Ensure the replayed seq is updated */
(void) zil_replaying(zfsvfs->z_log, tx);
dmu_tx_commit(tx);
}
iput(ZTOI(zp));
return (error);
}
示例6: osd_write
static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
const struct lu_buf *buf, loff_t *pos,
struct thandle *th, int ignore_quota)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
uint64_t offset = *pos;
int rc;
ENTRY;
LASSERT(dt_object_exists(dt));
LASSERT(obj->oo_db);
LASSERT(th != NULL);
oh = container_of0(th, struct osd_thandle, ot_super);
record_start_io(osd, WRITE, 0);
dmu_write(osd->od_os, obj->oo_db->db_object, offset,
(uint64_t)buf->lb_len, buf->lb_buf, oh->ot_tx);
write_lock(&obj->oo_attr_lock);
if (obj->oo_attr.la_size < offset + buf->lb_len) {
obj->oo_attr.la_size = offset + buf->lb_len;
write_unlock(&obj->oo_attr_lock);
/* osd_object_sa_update() will be copying directly from oo_attr
* into dbuf. any update within a single txg will copy the
* most actual */
rc = osd_object_sa_update(obj, SA_ZPL_SIZE(osd),
&obj->oo_attr.la_size, 8, oh);
if (unlikely(rc))
GOTO(out, rc);
} else {
write_unlock(&obj->oo_attr_lock);
}
*pos += buf->lb_len;
rc = buf->lb_len;
out:
record_end_io(osd, WRITE, 0, buf->lb_len,
buf->lb_len >> PAGE_CACHE_SHIFT);
RETURN(rc);
}
示例7: __osd_object_attr_get
/*
* Retrieve the attributes of a DMU object
*/
int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
struct osd_object *obj, struct lu_attr *la)
{
struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
sa_handle_t *sa_hdl;
int cnt = 0;
int rc;
ENTRY;
LASSERT(obj->oo_db != NULL);
rc = -sa_handle_get(o->od_os, obj->oo_db->db_object, NULL,
SA_HDL_PRIVATE, &sa_hdl);
if (rc)
RETURN(rc);
la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE |
LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
rc = -sa_bulk_lookup(sa_hdl, bulk, cnt);
if (rc)
GOTO(out_sa, rc);
la->la_atime = osa->atime[0];
la->la_mtime = osa->mtime[0];
la->la_ctime = osa->ctime[0];
la->la_mode = osa->mode;
la->la_uid = osa->uid;
la->la_gid = osa->gid;
la->la_nlink = osa->nlink;
la->la_flags = attrs_zfs2fs(osa->flags);
la->la_size = osa->size;
/* Try to get extra flag from LMA. Right now, only LMAI_ORPHAN
* flags is stored in LMA, and it is only for orphan directory */
if (S_ISDIR(la->la_mode) && dt_object_exists(&obj->oo_dt)) {
struct osd_thread_info *info = osd_oti_get(env);
struct lustre_mdt_attrs *lma;
struct lu_buf buf;
lma = (struct lustre_mdt_attrs *)info->oti_buf;
buf.lb_buf = lma;
buf.lb_len = sizeof(info->oti_buf);
rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
if (rc > 0) {
rc = 0;
lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
obj->oo_lma_flags =
lma_to_lustre_flags(lma->lma_incompat);
} else if (rc == -ENODATA) {
rc = 0;
}
}
if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
rc = -sa_lookup(sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
if (rc)
GOTO(out_sa, rc);
la->la_rdev = osa->rdev;
la->la_valid |= LA_RDEV;
}
out_sa:
sa_handle_destroy(sa_hdl);
RETURN(rc);
}
示例8: zfs_sa_upgrade
void
zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
{
dmu_buf_t *db = sa_get_db(hdl);
znode_t *zp = sa_get_userdata(hdl);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
sa_bulk_attr_t bulk[20];
int count = 0;
sa_bulk_attr_t sa_attrs[20] = { { 0 } };
zfs_acl_locator_cb_t locate = { 0 };
uint64_t uid, gid, mode, rdev, xattr, parent;
uint64_t crtime[2], mtime[2], ctime[2];
zfs_acl_phys_t znode_acl;
char scanstamp[AV_SCANSTAMP_SZ];
boolean_t drop_lock = B_FALSE;
/*
* No upgrade if ACL isn't cached
* since we won't know which locks are held
* and ready the ACL would require special "locked"
* interfaces that would be messy
*/
if (zp->z_acl_cached == NULL || vnode_islnk(ZTOV(zp)))
return;
/*
* If the z_lock is held and we aren't the owner
* the just return since we don't want to deadlock
* trying to update the status of z_is_sa. This
* file can then be upgraded at a later time.
*
* Otherwise, we know we are doing the
* sa_update() that caused us to enter this function.
*/
if (mutex_owner(&zp->z_lock) != curthread) {
if (mutex_tryenter(&zp->z_lock) == 0)
return;
else
drop_lock = B_TRUE;
}
/* First do a bulk query of the attributes that aren't cached */
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL, &xattr, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL, &rdev, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&znode_acl, 88);
if (sa_bulk_lookup_locked(hdl, bulk, count) != 0)
goto done;
/*
* While the order here doesn't matter its best to try and organize
* it is such a way to pick up an already existing layout number
*/
count = 0;
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zfsvfs),
NULL, &zp->z_gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zfsvfs), NULL,
zp->z_atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
&crtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zfsvfs), NULL,
&zp->z_links, 8);
if (vnode_isblk(zp->z_vnode) || vnode_islnk(zp->z_vnode))
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
&rdev, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
&zp->z_acl_cached->z_acl_count, 8);
if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
zfs_acl_xform(zp, zp->z_acl_cached, CRED());
locate.cb_aclp = zp->z_acl_cached;
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate, zp->z_acl_cached->z_acl_bytes);
if (xattr)
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zfsvfs),
NULL, &xattr, 8);
//.........这里部分代码省略.........
示例9: osd_attr_set
//.........这里部分代码省略.........
/* do both accounting updates outside oo_attr_lock below */
if ((valid & LA_UID) && (la->la_uid != obj->oo_attr.la_uid)) {
/* Update user accounting. Failure isn't fatal, but we still
* log an error message */
rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
la->la_uid, 1, oh->ot_tx);
if (rc)
CERROR("%s: failed to update accounting ZAP for user "
"%d (%d)\n", osd->od_svname, la->la_uid, rc);
rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
obj->oo_attr.la_uid, -1, oh->ot_tx);
if (rc)
CERROR("%s: failed to update accounting ZAP for user "
"%d (%d)\n", osd->od_svname,
obj->oo_attr.la_uid, rc);
}
if ((valid & LA_GID) && (la->la_gid != obj->oo_attr.la_gid)) {
/* Update group accounting. Failure isn't fatal, but we still
* log an error message */
rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
la->la_gid, 1, oh->ot_tx);
if (rc)
CERROR("%s: failed to update accounting ZAP for user "
"%d (%d)\n", osd->od_svname, la->la_gid, rc);
rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
obj->oo_attr.la_gid, -1, oh->ot_tx);
if (rc)
CERROR("%s: failed to update accounting ZAP for user "
"%d (%d)\n", osd->od_svname,
obj->oo_attr.la_gid, rc);
}
write_lock(&obj->oo_attr_lock);
cnt = 0;
if (valid & LA_ATIME) {
osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
osa->atime, 16);
}
if (valid & LA_MTIME) {
osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
osa->mtime, 16);
}
if (valid & LA_CTIME) {
osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
osa->ctime, 16);
}
if (valid & LA_MODE) {
/* mode is stored along with type, so read it first */
obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
(la->la_mode & ~S_IFMT);
osa->mode = obj->oo_attr.la_mode;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
&osa->mode, 8);
}
if (valid & LA_SIZE) {
osa->size = obj->oo_attr.la_size = la->la_size;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
&osa->size, 8);
}
if (valid & LA_NLINK) {
osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
&osa->nlink, 8);
}
if (valid & LA_RDEV) {
osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
&osa->rdev, 8);
}
if (valid & LA_FLAGS) {
osa->flags = attrs_fs2zfs(la->la_flags);
/* many flags are not supported by zfs, so ensure a good cached
* copy */
obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
&osa->flags, 8);
}
if (valid & LA_UID) {
osa->uid = obj->oo_attr.la_uid = la->la_uid;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
&osa->uid, 8);
}
if (valid & LA_GID) {
osa->gid = obj->oo_attr.la_gid = la->la_gid;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
&osa->gid, 8);
}
obj->oo_attr.la_valid |= valid;
write_unlock(&obj->oo_attr_lock);
LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
out:
up_read(&obj->oo_guard);
RETURN(rc);
}
示例10: zfs_link_create
/*
* Link zp into dl. Can only fail if zp has been unlinked.
*/
int
zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
{
znode_t *dzp = dl->dl_dzp;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t value;
int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode);
sa_bulk_attr_t bulk[5];
uint64_t mtime[2], ctime[2];
uint64_t links;
int count = 0;
int error;
mutex_enter(&zp->z_lock);
if (!(flag & ZRENAMING)) {
if (zp->z_unlinked) { /* no new links to unlinked zp */
ASSERT(!(flag & (ZNEW | ZEXISTS)));
mutex_exit(&zp->z_lock);
return (SET_ERROR(ENOENT));
}
if (!(flag & ZNEW)) {
/*
* ZNEW nodes come from zfs_mknode() where the link
* count has already been initialised
*/
inc_nlink(ZTOI(zp));
links = ZTOI(zp)->i_nlink;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &links, sizeof (links));
}
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&dzp->z_id, sizeof (dzp->z_id));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (!(flag & ZNEW)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
ctime);
}
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT(error == 0);
mutex_exit(&zp->z_lock);
mutex_enter(&dzp->z_lock);
dzp->z_size++;
if (zp_is_dir)
inc_nlink(ZTOI(dzp));
links = ZTOI(dzp)->i_nlink;
count = 0;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&dzp->z_size, sizeof (dzp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&links, sizeof (links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
ASSERT(error == 0);
mutex_exit(&dzp->z_lock);
value = zfs_dirent(zp, zp->z_mode);
error = zap_add(ZTOZSB(zp)->z_os, dzp->z_id, dl->dl_name,
8, 1, &value, tx);
ASSERT(error == 0);
return (0);
}
示例11: zfs_link_destroy
/*
* Unlink zp from dl, and mark zp for deletion if this was the last link. Can
* fail if zp is a mount point (EBUSY) or a non-empty directory (ENOTEMPTY).
* If 'unlinkedp' is NULL, we put unlinked znodes on the unlinked list.
* If it's non-NULL, we use it to indicate whether the znode needs deletion,
* and it's the caller's job to do it.
*/
int
zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
boolean_t *unlinkedp)
{
znode_t *dzp = dl->dl_dzp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode);
boolean_t unlinked = B_FALSE;
sa_bulk_attr_t bulk[5];
uint64_t mtime[2], ctime[2];
uint64_t links;
int count = 0;
int error;
#ifdef HAVE_DNLC
dnlc_remove(ZTOI(dzp), dl->dl_name);
#endif /* HAVE_DNLC */
if (!(flag & ZRENAMING)) {
mutex_enter(&zp->z_lock);
if (zp_is_dir && !zfs_dirempty(zp)) {
mutex_exit(&zp->z_lock);
return (SET_ERROR(ENOTEMPTY));
}
/*
* If we get here, we are going to try to remove the object.
* First try removing the name from the directory; if that
* fails, return the error.
*/
error = zfs_dropname(dl, zp, dzp, tx, flag);
if (error != 0) {
mutex_exit(&zp->z_lock);
return (error);
}
if (ZTOI(zp)->i_nlink <= zp_is_dir) {
zfs_panic_recover("zfs: link count on %lu is %u, "
"should be at least %u", zp->z_id,
(int)ZTOI(zp)->i_nlink, zp_is_dir + 1);
set_nlink(ZTOI(zp), zp_is_dir + 1);
}
drop_nlink(ZTOI(zp));
if (ZTOI(zp)->i_nlink == zp_is_dir) {
zp->z_unlinked = B_TRUE;
clear_nlink(ZTOI(zp));
unlinked = B_TRUE;
} else {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, sizeof (ctime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, sizeof (zp->z_pflags));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
ctime);
}
links = ZTOI(zp)->i_nlink;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &links, sizeof (links));
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
count = 0;
ASSERT(error == 0);
mutex_exit(&zp->z_lock);
} else {
error = zfs_dropname(dl, zp, dzp, tx, flag);
if (error != 0)
return (error);
}
mutex_enter(&dzp->z_lock);
dzp->z_size--; /* one dirent removed */
if (zp_is_dir)
drop_nlink(ZTOI(dzp)); /* ".." link from zp */
links = ZTOI(dzp)->i_nlink;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &links, sizeof (links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
NULL, &dzp->z_size, sizeof (dzp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
NULL, ctime, sizeof (ctime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
NULL, mtime, sizeof (mtime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
ASSERT(error == 0);
mutex_exit(&dzp->z_lock);
if (unlinkedp != NULL)
*unlinkedp = unlinked;
else if (unlinked)
zfs_unlinked_add(zp, tx);
//.........这里部分代码省略.........
示例12: zfs_link_destroy
/*
* Unlink zp from dl, and mark zp for deletion if this was the last link.
* Can fail if zp is a mount point (EBUSY) or a non-empty directory (EEXIST).
* If 'unlinkedp' is NULL, we put unlinked znodes on the unlinked list.
* If it's non-NULL, we use it to indicate whether the znode needs deletion,
* and it's the caller's job to do it.
*/
int
zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
boolean_t *unlinkedp)
{
znode_t *dzp = dl->dl_dzp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
vnode_t *vp = ZTOV(zp);
int zp_is_dir = (vp->v_type == VDIR);
boolean_t unlinked = B_FALSE;
sa_bulk_attr_t bulk[5];
uint64_t mtime[2], ctime[2];
int count = 0;
int error;
dnlc_remove(ZTOV(dzp), dl->dl_name);
if (!(flag & ZRENAMING)) {
#ifdef HAVE_ZPL
if (vn_vfswlock(vp)) /* prevent new mounts on zp */
return (EBUSY);
if (vn_ismntpt(vp)) { /* don't remove mount point */
vn_vfsunlock(vp);
return (EBUSY);
}
#endif
mutex_enter(&zp->z_lock);
if (zp_is_dir && !zfs_dirempty(zp)) {
mutex_exit(&zp->z_lock);
#ifdef HAVE_ZPL
vn_vfsunlock(vp);
#endif
return (EEXIST);
}
/*
* If we get here, we are going to try to remove the object.
* First try removing the name from the directory; if that
* fails, return the error.
*/
error = zfs_dropname(dl, zp, dzp, tx, flag);
if (error != 0) {
mutex_exit(&zp->z_lock);
#ifdef HAVE_ZPL
vn_vfsunlock(vp);
#endif
return (error);
}
if (zp->z_links <= zp_is_dir) {
#ifdef HAVE_ZPL
zfs_panic_recover("zfs: link count on %s is %u, "
"should be at least %u",
zp->z_vnode->v_path ? zp->z_vnode->v_path :
"<unknown>", (int)zp->z_links,
zp_is_dir + 1);
#endif
zp->z_links = zp_is_dir + 1;
}
if (--zp->z_links == zp_is_dir) {
zp->z_unlinked = B_TRUE;
zp->z_links = 0;
unlinked = B_TRUE;
} else {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, sizeof (ctime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, sizeof (zp->z_pflags));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
B_TRUE);
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &zp->z_links, sizeof (zp->z_links));
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
count = 0;
ASSERT(error == 0);
mutex_exit(&zp->z_lock);
#ifdef HAVE_ZPL
vn_vfsunlock(vp);
#endif
} else {
error = zfs_dropname(dl, zp, dzp, tx, flag);
if (error != 0)
return (error);
}
mutex_enter(&dzp->z_lock);
dzp->z_size--; /* one dirent removed */
dzp->z_links -= zp_is_dir; /* ".." link from zp */
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &dzp->z_links, sizeof (dzp->z_links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
//.........这里部分代码省略.........
示例13: zfs_link_create
/*
* Link zp into dl. Can only fail if zp has been unlinked.
*/
int
zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
{
znode_t *dzp = dl->dl_dzp;
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
#ifdef __APPLE__
uint64_t value;
/* OSX - don't access the vnode here since it might not be attached yet. */
//int zp_is_dir = S_ISDIR(zp->z_phys->zp_mode);
int zp_is_dir = S_ISDIR(zp->z_mode);
#else
vnode_t *vp = ZTOV(zp);
uint64_t value;
int zp_is_dir = (vp->v_type == VDIR);
#endif
sa_bulk_attr_t bulk[5];
uint64_t mtime[2], ctime[2];
int count = 0;
int error;
mutex_enter(&zp->z_lock);
if (!(flag & ZRENAMING)) {
if (zp->z_unlinked) { /* no new links to unlinked zp */
ASSERT(!(flag & (ZNEW | ZEXISTS)));
mutex_exit(&zp->z_lock);
return (ENOENT);
}
zp->z_links++;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&zp->z_links, sizeof (zp->z_links));
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&dzp->z_id, sizeof (dzp->z_id));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (!(flag & ZNEW)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
ctime, B_TRUE);
}
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
// Needed?
#ifdef __APPLE__
zp->z_parent = dzp->z_id;
#endif
mutex_exit(&zp->z_lock);
mutex_enter(&dzp->z_lock);
dzp->z_size++;
dzp->z_links += zp_is_dir;
count = 0;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&dzp->z_size, sizeof (dzp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&dzp->z_links, sizeof (dzp->z_links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
mutex_exit(&dzp->z_lock);
value = zfs_dirent(zp, zp->z_mode);
error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name,
8, 1, &value, tx);
ASSERT(error == 0);
#ifndef __APPLE__
/* On Mac OS X, this is done up in VFS layer. */
dnlc_update(ZTOV(dzp), dl->dl_name, vp);
#endif
return (0);
}
示例14: zfs_getattr_znode_locked
/* For part 1 of zfs_getattr() */
int
zfs_getattr_znode_locked(vattr_t *vap, znode_t *zp, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
uint64_t times[2];
uint64_t val;
VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
&val, sizeof (val)) == 0);
vap->va_mode = val & MODEMASK;
VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_UID(zfsvfs),
&val, sizeof (val)) == 0);
vap->va_uid = val;
VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_GID(zfsvfs),
&val, sizeof (val)) == 0);
vap->va_gid = val;
//vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev;
/* On OS X, the root directory id is always 2 */
vap->va_fileid = (zp->z_id == zfsvfs->z_root) ? 2 : zp->z_id;
VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
&val, sizeof (val)) == 0);
vap->va_nlink = val;
VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
&val, sizeof (val)) == 0);
vap->va_data_size = val;
vap->va_total_size = val;
VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(zfsvfs),
&val, sizeof (val)) == 0);
vap->va_rdev = val;
VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
&val, sizeof (val)) == 0);
vap->va_gen = val;
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
times, sizeof (times));
ZFS_TIME_DECODE(&vap->va_create_time, times);
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
times, sizeof (times));
ZFS_TIME_DECODE(&vap->va_access_time, times);
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zfsvfs),
times, sizeof (times));
ZFS_TIME_DECODE(&vap->va_modify_time, times);
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zfsvfs),
times, sizeof (times));
ZFS_TIME_DECODE(&vap->va_change_time, times);
if (VATTR_IS_ACTIVE(vap, va_backup_time)) {
vap->va_backup_time.tv_sec = 0;
vap->va_backup_time.tv_nsec = 0;
VATTR_SET_SUPPORTED(vap, va_backup_time);
}
vap->va_flags = zfs_getbsdflags(zp);
/* On OS X, the root directory id is always 2 and its parent is 1 */
VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&val, sizeof (val)) == 0);
if (zp->z_id == zfsvfs->z_root)
vap->va_parentid = 1;
else if (val == zfsvfs->z_root)
vap->va_parentid = 2;
else
vap->va_parentid = val;
vap->va_iosize = zp->z_blksz ? zp->z_blksz : zfsvfs->z_max_blksz;
VATTR_SET_SUPPORTED(vap, va_iosize);
printf("stat blksize set to %d\n", vap->va_iosize);
vap->va_supported |= ZFS_SUPPORTED_VATTRS;
if (VATTR_IS_ACTIVE(vap, va_nchildren) && vnode_isdir(ZTOV(zp)))
VATTR_RETURN(vap, va_nchildren, vap->va_nlink - 2);
if (VATTR_IS_ACTIVE(vap, va_acl)) {
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
times, sizeof (times)))) {
// if (zp->z_phys->zp_acl.z_acl_count == 0) {
vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
} else {
error = zfs_getacl(zp, &vap->va_acl, B_TRUE, cr);
if (error)
return (error);
VATTR_SET_SUPPORTED(vap, va_acl);
/*
* va_acl implies that va_uuuid and va_guuid are
* also supported.
*/
VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
VATTR_RETURN(vap, va_guuid, kauth_null_guid);
}
}
return (0);
}
示例15: zfs_znode_alloc
/*
* Construct a znode+inode and initialize.
*
* This does not do a call to dmu_set_user() that is
* up to the caller to do, in case you don't want to
* return the znode
*/
static znode_t *
zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl,
struct inode *dip)
{
znode_t *zp;
struct inode *ip;
uint64_t mode;
uint64_t parent;
sa_bulk_attr_t bulk[9];
int count = 0;
ASSERT(zsb != NULL);
ip = new_inode(zsb->z_sb);
if (ip == NULL)
return (NULL);
zp = ITOZ(ip);
ASSERT(zp->z_dirlocks == NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
ASSERT3P(zp->z_xattr_parent, ==, NULL);
zp->z_moved = 0;
zp->z_sa_hdl = NULL;
zp->z_unlinked = 0;
zp->z_atime_dirty = 0;
zp->z_mapcnt = 0;
zp->z_id = db->db_object;
zp->z_blksz = blksz;
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
zp->z_is_zvol = B_FALSE;
zp->z_is_mapped = B_FALSE;
zp->z_is_ctldir = B_FALSE;
zp->z_is_stale = B_FALSE;
zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
&parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
&zp->z_atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
goto error;
}
zp->z_mode = mode;
/*
* xattr znodes hold a reference on their unique parent
*/
if (dip && zp->z_pflags & ZFS_XATTR) {
igrab(dip);
zp->z_xattr_parent = ITOZ(dip);
}
ip->i_ino = obj;
zfs_inode_update(zp);
zfs_inode_set_ops(zsb, ip);
/*
* The only way insert_inode_locked() can fail is if the ip->i_ino
* number is already hashed for this super block. This can never
* happen because the inode numbers map 1:1 with the object numbers.
*
* The one exception is rolling back a mounted file system, but in
* this case all the active inode are unhashed during the rollback.
*/
VERIFY3S(insert_inode_locked(ip), ==, 0);
mutex_enter(&zsb->z_znodes_lock);
list_insert_tail(&zsb->z_all_znodes, zp);
zsb->z_nr_znodes++;
membar_producer();
mutex_exit(&zsb->z_znodes_lock);
unlock_new_inode(ip);
return (zp);
error:
//.........这里部分代码省略.........