本文整理汇总了C++中dmu_objset_spa函数的典型用法代码示例。如果您正苦于以下问题:C++ dmu_objset_spa函数的具体用法?C++ dmu_objset_spa怎么用?C++ dmu_objset_spa使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dmu_objset_spa函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: zfs_set_version
int
zfs_set_version(zfs_sb_t *zsb, uint64_t newvers)
{
int error;
objset_t *os = zsb->z_os;
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (SET_ERROR(EINVAL));
if (newvers < zsb->z_version)
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zsb->z_os)))
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &newvers, tx);
if (error) {
dmu_tx_commit(tx);
return (error);
}
if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) {
uint64_t sa_obj;
ASSERT3U(spa_version(dmu_objset_spa(zsb->z_os)), >=,
SPA_VERSION_SA);
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, MASTER_NODE_OBJ,
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
VERIFY(0 == sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
示例2: bpobj_decr_empty
void
bpobj_decr_empty(objset_t *os, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_objset_pool(os);
spa_feature_decr(dmu_objset_spa(os), SPA_FEATURE_EMPTY_BPOBJ, tx);
if (!spa_feature_is_active(dmu_objset_spa(os),
SPA_FEATURE_EMPTY_BPOBJ)) {
VERIFY3U(0, ==, zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_EMPTY_BPOBJ, tx));
VERIFY3U(0, ==, dmu_object_free(os, dp->dp_empty_bpobj, tx));
dp->dp_empty_bpobj = 0;
}
示例3: bpobj_alloc_empty
/*
* Return an empty bpobj, preferably the empty dummy one (dp_empty_bpobj).
*/
uint64_t
bpobj_alloc_empty(objset_t *os, int blocksize, dmu_tx_t *tx)
{
zfeature_info_t *empty_bpobj_feat =
&spa_feature_table[SPA_FEATURE_EMPTY_BPOBJ];
spa_t *spa = dmu_objset_spa(os);
dsl_pool_t *dp = dmu_objset_pool(os);
if (spa_feature_is_enabled(spa, empty_bpobj_feat)) {
if (!spa_feature_is_active(spa, empty_bpobj_feat)) {
ASSERT0(dp->dp_empty_bpobj);
dp->dp_empty_bpobj =
bpobj_alloc(os, SPA_MAXBLOCKSIZE, tx);
VERIFY(zap_add(os,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
&dp->dp_empty_bpobj, tx) == 0);
}
spa_feature_incr(spa, empty_bpobj_feat, tx);
ASSERT(dp->dp_empty_bpobj != 0);
return (dp->dp_empty_bpobj);
} else {
return (bpobj_alloc(os, blocksize, tx));
}
}
示例4: osd_attr_get
static int osd_attr_get(const struct lu_env *env,
struct dt_object *dt,
struct lu_attr *attr)
{
struct osd_object *obj = osd_dt_obj(dt);
uint64_t blocks;
uint32_t blksize;
LASSERT(dt_object_exists(dt));
LASSERT(osd_invariant(obj));
LASSERT(obj->oo_db);
read_lock(&obj->oo_attr_lock);
*attr = obj->oo_attr;
read_unlock(&obj->oo_attr_lock);
/* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
* from within sa_object_size() can block on a mutex, so
* we can't call sa_object_size() holding rwlock */
sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
/* we do not control size of indices, so always calculate
* it from number of blocks reported by DMU */
if (S_ISDIR(attr->la_mode))
attr->la_size = 512 * blocks;
/* Block size may be not set; suggest maximal I/O transfers. */
if (blksize == 0)
blksize = osd_spa_maxblocksize(
dmu_objset_spa(osd_obj2dev(obj)->od_os));
attr->la_blksize = blksize;
attr->la_blocks = blocks;
attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
return 0;
}
示例5: bplist_space_birthrange
/*
* Return (in *dsizep) the amount of space on the deadlist which is:
* mintxg < blk_birth <= maxtxg
*/
int
bplist_space_birthrange(bplist_t *bpl, uint64_t mintxg, uint64_t maxtxg,
uint64_t *dsizep)
{
uint64_t size = 0;
uint64_t itor = 0;
blkptr_t bp;
int err;
/*
* As an optimization, if they want the whole txg range, just
* get bpl_bytes rather than iterating over the bps.
*/
if (mintxg < TXG_INITIAL && maxtxg == UINT64_MAX) {
mutex_enter(&bpl->bpl_lock);
err = bplist_hold(bpl);
if (err == 0)
*dsizep = bpl->bpl_phys->bpl_bytes;
mutex_exit(&bpl->bpl_lock);
return (err);
}
while ((err = bplist_iterate(bpl, &itor, &bp)) == 0) {
if (bp.blk_birth > mintxg && bp.blk_birth <= maxtxg) {
size += bp_get_dsize(dmu_objset_spa(bpl->bpl_mos), &bp);
}
}
if (err == ENOENT)
err = 0;
*dsizep = size;
return (err);
}
示例6: zil_alloc
zilog_t *
zil_alloc(objset_t *os, zil_header_t *zh_phys)
{
zilog_t *zilog;
zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
zilog->zl_header = zh_phys;
zilog->zl_os = os;
zilog->zl_spa = dmu_objset_spa(os);
zilog->zl_dmu_pool = dmu_objset_pool(os);
zilog->zl_destroy_txg = TXG_INITIAL - 1;
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zilog->zl_itx_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
offsetof(lwb_t, lwb_node));
mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
return (zilog);
}
示例7: blksz_changed_cb
static void
blksz_changed_cb(void *arg, uint64_t newval)
{
zfs_sb_t *zsb = arg;
ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zsb->z_os)));
ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
ASSERT(ISP2(newval));
zsb->z_max_blksz = newval;
}
示例8: bplist_create
uint64_t
bplist_create(objset_t *mos, int blocksize, dmu_tx_t *tx)
{
int size;
size = spa_version(dmu_objset_spa(mos)) < SPA_VERSION_BPLIST_ACCOUNT ?
BPLIST_SIZE_V0 : sizeof (bplist_phys_t);
return (dmu_object_alloc(mos, DMU_OT_BPLIST, blocksize,
DMU_OT_BPLIST_HDR, size, tx));
}
示例9: zfs_set_version
int
zfs_set_version(const char *name, uint64_t newvers)
{
int error;
objset_t *os;
dmu_tx_t *tx;
uint64_t curvers;
/*
* XXX for now, require that the filesystem be unmounted. Would
* be nice to find the zfsvfs_t and just update that if
* possible.
*/
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (EINVAL);
error = dmu_objset_open(name, DMU_OST_ZFS, DS_MODE_PRIMARY, &os);
if (error)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &curvers);
if (error)
goto out;
if (newvers < curvers) {
error = EINVAL;
goto out;
}
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, 0, ZPL_VERSION_STR);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out;
}
error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1,
&newvers, tx);
spa_history_internal_log(LOG_DS_UPGRADE,
dmu_objset_spa(os), tx, CRED(),
"oldver=%llu newver=%llu dataset = %llu", curvers, newvers,
dmu_objset_id(os));
dmu_tx_commit(tx);
out:
dmu_objset_close(os);
return (error);
}
示例10: zvol_first_open
static int
zvol_first_open(zvol_state_t *zv)
{
objset_t *os;
uint64_t volsize;
int error;
uint64_t ro;
/* lie and say we're read-only */
error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
if (error)
return (SET_ERROR(-error));
zv->zv_objset = os;
error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
if (error)
goto out_owned;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
goto out_owned;
error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
if (error)
goto out_owned;
set_capacity(zv->zv_disk, volsize >> 9);
zv->zv_volsize = volsize;
zv->zv_zilog = zil_open(os, zvol_get_data);
if (ro || dmu_objset_is_snapshot(os) ||
!spa_writeable(dmu_objset_spa(os))) {
set_disk_ro(zv->zv_disk, 1);
zv->zv_flags |= ZVOL_RDONLY;
} else {
set_disk_ro(zv->zv_disk, 0);
zv->zv_flags &= ~ZVOL_RDONLY;
}
out_owned:
if (error) {
dmu_objset_disown(os, zvol_tag);
zv->zv_objset = NULL;
}
return (SET_ERROR(-error));
}
示例11: bplist_enqueue
int
bplist_enqueue(bplist_t *bpl, const blkptr_t *bp, dmu_tx_t *tx)
{
uint64_t blk, off;
blkptr_t *bparray;
int err;
ASSERT(!BP_IS_HOLE(bp));
mutex_enter(&bpl->bpl_lock);
err = bplist_hold(bpl);
if (err)
return (err);
blk = bpl->bpl_phys->bpl_entries >> bpl->bpl_bpshift;
off = P2PHASE(bpl->bpl_phys->bpl_entries, 1ULL << bpl->bpl_bpshift);
err = bplist_cache(bpl, blk);
if (err) {
mutex_exit(&bpl->bpl_lock);
return (err);
}
dmu_buf_will_dirty(bpl->bpl_cached_dbuf, tx);
bparray = bpl->bpl_cached_dbuf->db_data;
bparray[off] = *bp;
/* We never need the fill count. */
bparray[off].blk_fill = 0;
/* The bplist will compress better if we can leave off the checksum */
if (!BP_GET_DEDUP(&bparray[off]))
bzero(&bparray[off].blk_cksum, sizeof (bparray[off].blk_cksum));
dmu_buf_will_dirty(bpl->bpl_dbuf, tx);
bpl->bpl_phys->bpl_entries++;
bpl->bpl_phys->bpl_bytes +=
bp_get_dsize_sync(dmu_objset_spa(bpl->bpl_mos), bp);
if (bpl->bpl_havecomp) {
bpl->bpl_phys->bpl_comp += BP_GET_PSIZE(bp);
bpl->bpl_phys->bpl_uncomp += BP_GET_UCSIZE(bp);
}
mutex_exit(&bpl->bpl_lock);
return (0);
}
示例12: zpios_dmu_object_create
static uint64_t
zpios_dmu_object_create(run_args_t *run_args, objset_t *os)
{
struct dmu_tx *tx;
uint64_t obj = 0ULL;
uint64_t blksize = run_args->block_size;
int rc;
if (blksize < SPA_MINBLOCKSIZE ||
blksize > spa_maxblocksize(dmu_objset_spa(os)) ||
!ISP2(blksize)) {
zpios_print(run_args->file,
"invalid block size for pool: %d\n", (int)blksize);
return (obj);
}
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, OBJ_SIZE);
rc = dmu_tx_assign(tx, TXG_WAIT);
if (rc) {
zpios_print(run_args->file,
"dmu_tx_assign() failed: %d\n", rc);
dmu_tx_abort(tx);
return (obj);
}
obj = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0, DMU_OT_NONE, 0, tx);
rc = dmu_object_set_blocksize(os, obj, blksize, 0, tx);
if (rc) {
zpios_print(run_args->file,
"dmu_object_set_blocksize to %d failed: %d\n",
(int)blksize, rc);
dmu_tx_abort(tx);
return (obj);
}
dmu_tx_commit(tx);
return (obj);
}
示例13: zvol_first_open
static int
zvol_first_open(zvol_state_t *zv)
{
objset_t *os;
uint64_t volsize;
int locked = 0;
int error;
uint64_t ro;
/*
* In all other cases the spa_namespace_lock is taken before the
* bdev->bd_mutex lock. But in this case the Linux __blkdev_get()
* function calls fops->open() with the bdev->bd_mutex lock held.
*
* To avoid a potential lock inversion deadlock we preemptively
* try to take the spa_namespace_lock(). Normally it will not
* be contended and this is safe because spa_open_common() handles
* the case where the caller already holds the spa_namespace_lock.
*
* When it is contended we risk a lock inversion if we were to
* block waiting for the lock. Luckily, the __blkdev_get()
* function allows us to return -ERESTARTSYS which will result in
* bdev->bd_mutex being dropped, reacquired, and fops->open() being
* called again. This process can be repeated safely until both
* locks are acquired.
*/
if (!mutex_owned(&spa_namespace_lock)) {
locked = mutex_tryenter(&spa_namespace_lock);
if (!locked)
return (-ERESTARTSYS);
}
/* lie and say we're read-only */
error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
if (error)
goto out_mutex;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error) {
dmu_objset_disown(os, zvol_tag);
goto out_mutex;
}
zv->zv_objset = os;
error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
if (error) {
dmu_objset_disown(os, zvol_tag);
goto out_mutex;
}
set_capacity(zv->zv_disk, volsize >> 9);
zv->zv_volsize = volsize;
zv->zv_zilog = zil_open(os, zvol_get_data);
VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL) == 0);
if (ro || dmu_objset_is_snapshot(os) ||
!spa_writeable(dmu_objset_spa(os))) {
set_disk_ro(zv->zv_disk, 1);
zv->zv_flags |= ZVOL_RDONLY;
} else {
set_disk_ro(zv->zv_disk, 0);
zv->zv_flags &= ~ZVOL_RDONLY;
}
out_mutex:
if (locked)
mutex_exit(&spa_namespace_lock);
return (-error);
}
示例14: zvol_create_minor_impl
/*
* Create a block device minor node and setup the linkage between it
* and the specified volume. Once this function returns the block
* device is live and ready for use.
*/
static int
zvol_create_minor_impl(const char *name)
{
zvol_state_t *zv;
objset_t *os;
dmu_object_info_t *doi;
uint64_t volsize;
uint64_t len;
unsigned minor = 0;
int error = 0;
mutex_enter(&zvol_state_lock);
zv = zvol_find_by_name(name);
if (zv) {
error = SET_ERROR(EEXIST);
goto out;
}
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
if (error)
goto out_doi;
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error)
goto out_dmu_objset_disown;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
goto out_dmu_objset_disown;
error = zvol_find_minor(&minor);
if (error)
goto out_dmu_objset_disown;
zv = zvol_alloc(MKDEV(zvol_major, minor), name);
if (zv == NULL) {
error = SET_ERROR(EAGAIN);
goto out_dmu_objset_disown;
}
if (dmu_objset_is_snapshot(os))
zv->zv_flags |= ZVOL_RDONLY;
zv->zv_volblocksize = doi->doi_data_block_size;
zv->zv_volsize = volsize;
zv->zv_objset = os;
set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
blk_queue_max_hw_sectors(zv->zv_queue, (DMU_MAX_ACCESS / 4) >> 9);
blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
blk_queue_max_discard_sectors(zv->zv_queue,
(zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
#ifdef QUEUE_FLAG_NONROT
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
#endif
#ifdef QUEUE_FLAG_ADD_RANDOM
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zv->zv_queue);
#endif
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
zil_destroy(dmu_objset_zil(os), B_FALSE);
else
zil_replay(os, zv, zvol_replay_vector);
}
/*
* When udev detects the addition of the device it will immediately
* invoke blkid(8) to determine the type of content on the device.
* Prefetching the blocks commonly scanned by blkid(8) will speed
* up this process.
*/
len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE);
if (len > 0) {
dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
ZIO_PRIORITY_SYNC_READ);
}
zv->zv_objset = NULL;
out_dmu_objset_disown:
dmu_objset_disown(os, zvol_tag);
out_doi:
kmem_free(doi, sizeof (dmu_object_info_t));
out:
//.........这里部分代码省略.........
示例15: zfs_register_callbacks
int
zfs_register_callbacks(zfs_sb_t *zsb)
{
struct dsl_dataset *ds = NULL;
objset_t *os = zsb->z_os;
zfs_mntopts_t *zmo = zsb->z_mntopts;
int error = 0;
ASSERT(zsb);
ASSERT(zmo);
/*
* The act of registering our callbacks will destroy any mount
* options we may have. In order to enable temporary overrides
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
if (zfs_is_readonly(zsb) || !spa_writeable(dmu_objset_spa(os))) {
zmo->z_do_readonly = B_TRUE;
zmo->z_readonly = B_TRUE;
}
/*
* Register property callbacks.
*
* It would probably be fine to just check for i/o error from
* the first prop_register(), but I guess I like to go
* overboard...
*/
ds = dmu_objset_ds(os);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
error = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RELATIME), relatime_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DEVICES), devices_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLTYPE), acltype_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_VSCAN), vscan_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_NBMAND), nbmand_changed_cb, zsb);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto unregister;
/*
* Invoke our callbacks to restore temporary mount options.
*/
if (zmo->z_do_readonly)
readonly_changed_cb(zsb, zmo->z_readonly);
if (zmo->z_do_setuid)
setuid_changed_cb(zsb, zmo->z_setuid);
if (zmo->z_do_exec)
exec_changed_cb(zsb, zmo->z_exec);
if (zmo->z_do_devices)
devices_changed_cb(zsb, zmo->z_devices);
if (zmo->z_do_xattr)
xattr_changed_cb(zsb, zmo->z_xattr);
if (zmo->z_do_atime)
atime_changed_cb(zsb, zmo->z_atime);
if (zmo->z_do_relatime)
relatime_changed_cb(zsb, zmo->z_relatime);
if (zmo->z_do_nbmand)
nbmand_changed_cb(zsb, zmo->z_nbmand);
return (0);
unregister:
/*
* We may attempt to unregister some callbacks that are not
* registered, but this is OK; it will simply return ENOMSG,
* which we will ignore.
*/
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_ATIME),
atime_changed_cb, zsb);
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_RELATIME),
relatime_changed_cb, zsb);
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
xattr_changed_cb, zsb);
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
blksz_changed_cb, zsb);
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_READONLY),
readonly_changed_cb, zsb);
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_DEVICES),
//.........这里部分代码省略.........