本文整理汇总了C++中spa_writeable函数的典型用法代码示例。如果您正苦于以下问题:C++ spa_writeable函数的具体用法?C++ spa_writeable怎么用?C++ spa_writeable使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了spa_writeable函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: spa_history_log_nvl
int
spa_history_log_nvl(spa_t *spa, nvlist_t *nvl)
{
int err = 0;
dmu_tx_t *tx;
nvlist_t *nvarg;
if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY || !spa_writeable(spa))
return (SET_ERROR(EINVAL));
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err) {
dmu_tx_abort(tx);
return (err);
}
VERIFY0(nvlist_dup(nvl, &nvarg, KM_SLEEP));
if (spa_history_zone() != NULL) {
fnvlist_add_string(nvarg, ZPOOL_HIST_ZONE,
spa_history_zone());
}
fnvlist_add_uint64(nvarg, ZPOOL_HIST_WHO, crgetruid(CRED()));
/* Kick this off asynchronously; errors are ignored. */
dsl_sync_task_nowait(spa_get_dsl(spa), spa_history_log_sync,
nvarg, 0, ZFS_SPACE_CHECK_NONE, tx);
dmu_tx_commit(tx);
/* spa_history_log_sync will free nvl */
return (err);
}
示例2: log_internal
/*
* The nvlist will be consumed by this call.
*/
static void
log_internal(nvlist_t *nvl, const char *operation, spa_t *spa,
dmu_tx_t *tx, const char *fmt, va_list adx)
{
char *msg;
/*
* If this is part of creating a pool, not everything is
* initialized yet, so don't bother logging the internal events.
* Likewise if the pool is not writeable.
*/
if (tx->tx_txg == TXG_INITIAL || !spa_writeable(spa)) {
fnvlist_free(nvl);
return;
}
msg = kmem_vasprintf(fmt, adx);
fnvlist_add_string(nvl, ZPOOL_HIST_INT_STR, msg);
strfree(msg);
fnvlist_add_string(nvl, ZPOOL_HIST_INT_NAME, operation);
fnvlist_add_uint64(nvl, ZPOOL_HIST_TXG, tx->tx_txg);
if (dmu_tx_is_syncing(tx)) {
spa_history_log_sync(nvl, tx);
} else {
dsl_sync_task_nowait(spa_get_dsl(spa),
spa_history_log_sync, nvl, 0, ZFS_SPACE_CHECK_NONE, tx);
}
/* spa_history_log_sync() will free nvl */
}
示例3: dsl_sync_task_do_nowait
void
dsl_sync_task_do_nowait(dsl_pool_t *dp,
dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc,
void *arg1, void *arg2, int blocks_modified, dmu_tx_t *tx)
{
dsl_sync_task_group_t *dstg;
if (!spa_writeable(dp->dp_spa))
return;
dstg = dsl_sync_task_group_create(dp);
dsl_sync_task_create(dstg, checkfunc, syncfunc,
arg1, arg2, blocks_modified);
dsl_sync_task_group_nowait(dstg, tx);
}
示例4: zvol_first_open
static int
zvol_first_open(zvol_state_t *zv)
{
objset_t *os;
uint64_t volsize;
int error;
uint64_t ro;
/* lie and say we're read-only */
error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
if (error)
return (SET_ERROR(-error));
zv->zv_objset = os;
error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
if (error)
goto out_owned;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
goto out_owned;
error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
if (error)
goto out_owned;
set_capacity(zv->zv_disk, volsize >> 9);
zv->zv_volsize = volsize;
zv->zv_zilog = zil_open(os, zvol_get_data);
if (ro || dmu_objset_is_snapshot(os) ||
!spa_writeable(dmu_objset_spa(os))) {
set_disk_ro(zv->zv_disk, 1);
zv->zv_flags |= ZVOL_RDONLY;
} else {
set_disk_ro(zv->zv_disk, 0);
zv->zv_flags &= ~ZVOL_RDONLY;
}
out_owned:
if (error) {
dmu_objset_disown(os, zvol_tag);
zv->zv_objset = NULL;
}
return (SET_ERROR(-error));
}
示例5: mmp_thread_start
void
mmp_thread_start(spa_t *spa)
{
mmp_thread_t *mmp = &spa->spa_mmp;
if (spa_writeable(spa)) {
mutex_enter(&mmp->mmp_thread_lock);
if (!mmp->mmp_thread) {
dprintf("mmp_thread_start pool %s\n",
spa->spa_name);
mmp->mmp_thread = thread_create(NULL, 0, mmp_thread,
spa, 0, &p0, TS_RUN, defclsyspri);
}
mutex_exit(&mmp->mmp_thread_lock);
}
}
示例6: dsl_sync_task_do
int
dsl_sync_task_do(dsl_pool_t *dp,
dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc,
void *arg1, void *arg2, int blocks_modified)
{
dsl_sync_task_group_t *dstg;
int err;
ASSERT(spa_writeable(dp->dp_spa));
dstg = dsl_sync_task_group_create(dp);
dsl_sync_task_create(dstg, checkfunc, syncfunc,
arg1, arg2, blocks_modified);
err = dsl_sync_task_group_wait(dstg);
dsl_sync_task_group_destroy(dstg);
return (err);
}
示例7: traverse_zil
static void
traverse_zil(traverse_data_t *td, zil_header_t *zh)
{
uint64_t claim_txg = zh->zh_claim_txg;
zilog_t *zilog;
/*
* We only want to visit blocks that have been claimed but not yet
* replayed; plus, in read-only mode, blocks that are already stable.
*/
if (claim_txg == 0 && spa_writeable(td->td_spa))
return;
zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
(void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
claim_txg);
zil_free(zilog);
}
示例8: dsl_scan_zil
static void
dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
{
uint64_t claim_txg = zh->zh_claim_txg;
zil_scan_arg_t zsa = { dp, zh };
zilog_t *zilog;
/*
* We only want to visit blocks that have been claimed but not yet
* replayed (or, in read-only mode, blocks that *would* be claimed).
*/
if (claim_txg == 0 && spa_writeable(dp->dp_spa))
return;
zilog = zil_alloc(dp->dp_meta_objset, zh);
(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
claim_txg);
zil_free(zilog);
}
示例9: log_internal
/*
* The nvlist will be consumed by this call.
*/
static void
log_internal(nvlist_t *nvl, const char *operation, spa_t *spa,
dmu_tx_t *tx, const char *fmt, va_list adx)
{
char *msg;
va_list adx1;
int size;
/*
* If this is part of creating a pool, not everything is
* initialized yet, so don't bother logging the internal events.
* Likewise if the pool is not writeable.
*/
if (tx->tx_txg == TXG_INITIAL || !spa_writeable(spa)) {
fnvlist_free(nvl);
return;
}
va_copy(adx1, adx);
size = vsnprintf(NULL, 0, fmt, adx1) + 1;
msg = kmem_alloc(size, KM_PUSHPAGE);
va_end(adx1);
va_copy(adx1, adx);
(void) vsprintf(msg, fmt, adx1);
va_end(adx1);
fnvlist_add_string(nvl, ZPOOL_HIST_INT_STR, msg);
kmem_free(msg, size);
fnvlist_add_string(nvl, ZPOOL_HIST_INT_NAME, operation);
fnvlist_add_uint64(nvl, ZPOOL_HIST_TXG, tx->tx_txg);
if (dmu_tx_is_syncing(tx)) {
spa_history_log_sync(nvl, tx);
} else {
dsl_sync_task_nowait(spa_get_dsl(spa),
spa_history_log_sync, nvl, 0, tx);
}
/* spa_history_log_sync() will free nvl */
}
示例10: zfs_sb_setup
int
zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting)
{
int error;
error = zfs_register_callbacks(zsb);
if (error)
return (error);
/*
* Set the objset user_ptr to track its zsb.
*/
mutex_enter(&zsb->z_os->os_user_ptr_lock);
dmu_objset_set_user(zsb->z_os, zsb);
mutex_exit(&zsb->z_os->os_user_ptr_lock);
zsb->z_log = zil_open(zsb->z_os, zfs_get_data);
/*
* If we are not mounting (ie: online recv), then we don't
* have to worry about replaying the log as we blocked all
* operations out since we closed the ZIL.
*/
if (mounting) {
boolean_t readonly;
/*
* During replay we remove the read only flag to
* allow replays to succeed.
*/
readonly = zfs_is_readonly(zsb);
if (readonly != 0)
readonly_changed_cb(zsb, B_FALSE);
else
zfs_unlinked_drain(zsb);
/*
* Parse and replay the intent log.
*
* Because of ziltest, this must be done after
* zfs_unlinked_drain(). (Further note: ziltest
* doesn't use readonly mounts, where
* zfs_unlinked_drain() isn't called.) This is because
* ziltest causes spa_sync() to think it's committed,
* but actually it is not, so the intent log contains
* many txg's worth of changes.
*
* In particular, if object N is in the unlinked set in
* the last txg to actually sync, then it could be
* actually freed in a later txg and then reallocated
* in a yet later txg. This would write a "create
* object N" record to the intent log. Normally, this
* would be fine because the spa_sync() would have
* written out the fact that object N is free, before
* we could write the "create object N" intent log
* record.
*
* But when we are in ziltest mode, we advance the "open
* txg" without actually spa_sync()-ing the changes to
* disk. So we would see that object N is still
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
if (spa_writeable(dmu_objset_spa(zsb->z_os))) {
if (zil_replay_disable) {
zil_destroy(zsb->z_log, B_FALSE);
} else {
zsb->z_replay = B_TRUE;
zil_replay(zsb->z_os, zsb,
zfs_replay_vector);
zsb->z_replay = B_FALSE;
}
}
/* restore readonly bit */
if (readonly != 0)
readonly_changed_cb(zsb, B_TRUE);
}
return (0);
}
示例11: zfs_register_callbacks
int
zfs_register_callbacks(zfs_sb_t *zsb)
{
struct dsl_dataset *ds = NULL;
objset_t *os = zsb->z_os;
zfs_mntopts_t *zmo = zsb->z_mntopts;
int error = 0;
ASSERT(zsb);
ASSERT(zmo);
/*
* The act of registering our callbacks will destroy any mount
* options we may have. In order to enable temporary overrides
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
if (zfs_is_readonly(zsb) || !spa_writeable(dmu_objset_spa(os))) {
zmo->z_do_readonly = B_TRUE;
zmo->z_readonly = B_TRUE;
}
/*
* Register property callbacks.
*
* It would probably be fine to just check for i/o error from
* the first prop_register(), but I guess I like to go
* overboard...
*/
ds = dmu_objset_ds(os);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
error = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RELATIME), relatime_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DEVICES), devices_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLTYPE), acltype_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_VSCAN), vscan_changed_cb, zsb);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_NBMAND), nbmand_changed_cb, zsb);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto unregister;
/*
* Invoke our callbacks to restore temporary mount options.
*/
if (zmo->z_do_readonly)
readonly_changed_cb(zsb, zmo->z_readonly);
if (zmo->z_do_setuid)
setuid_changed_cb(zsb, zmo->z_setuid);
if (zmo->z_do_exec)
exec_changed_cb(zsb, zmo->z_exec);
if (zmo->z_do_devices)
devices_changed_cb(zsb, zmo->z_devices);
if (zmo->z_do_xattr)
xattr_changed_cb(zsb, zmo->z_xattr);
if (zmo->z_do_atime)
atime_changed_cb(zsb, zmo->z_atime);
if (zmo->z_do_relatime)
relatime_changed_cb(zsb, zmo->z_relatime);
if (zmo->z_do_nbmand)
nbmand_changed_cb(zsb, zmo->z_nbmand);
return (0);
unregister:
/*
* We may attempt to unregister some callbacks that are not
* registered, but this is OK; it will simply return ENOMSG,
* which we will ignore.
*/
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_ATIME),
atime_changed_cb, zsb);
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_RELATIME),
relatime_changed_cb, zsb);
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_XATTR),
xattr_changed_cb, zsb);
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
blksz_changed_cb, zsb);
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_READONLY),
readonly_changed_cb, zsb);
(void) dsl_prop_unregister(ds, zfs_prop_to_name(ZFS_PROP_DEVICES),
//.........这里部分代码省略.........
示例12: __zvol_create_minor
static int
__zvol_create_minor(const char *name, boolean_t ignore_snapdev)
{
zvol_state_t *zv;
objset_t *os;
dmu_object_info_t *doi;
uint64_t volsize;
unsigned minor = 0;
int error = 0;
ASSERT(MUTEX_HELD(&zvol_state_lock));
zv = zvol_find_by_name(name);
if (zv) {
error = EEXIST;
goto out;
}
if (ignore_snapdev == B_FALSE) {
error = __zvol_snapdev_hidden(name);
if (error)
goto out;
}
doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
if (error)
goto out_doi;
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error)
goto out_dmu_objset_disown;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
goto out_dmu_objset_disown;
error = zvol_find_minor(&minor);
if (error)
goto out_dmu_objset_disown;
zv = zvol_alloc(MKDEV(zvol_major, minor), name);
if (zv == NULL) {
error = EAGAIN;
goto out_dmu_objset_disown;
}
if (dmu_objset_is_snapshot(os))
zv->zv_flags |= ZVOL_RDONLY;
zv->zv_volblocksize = doi->doi_data_block_size;
zv->zv_volsize = volsize;
zv->zv_objset = os;
set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX);
blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
#ifdef HAVE_BLK_QUEUE_DISCARD
blk_queue_max_discard_sectors(zv->zv_queue,
(zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
#endif
#ifdef HAVE_BLK_QUEUE_NONROT
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
#endif
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
zil_destroy(dmu_objset_zil(os), B_FALSE);
else
zil_replay(os, zv, zvol_replay_vector);
}
zv->zv_objset = NULL;
out_dmu_objset_disown:
dmu_objset_disown(os, zvol_tag);
out_doi:
kmem_free(doi, sizeof(dmu_object_info_t));
out:
if (error == 0) {
zvol_insert(zv);
add_disk(zv->zv_disk);
}
return (error);
}
示例13: vdev_disk_open
//.........这里部分代码省略.........
#ifdef illumos
if (vd->vdev_physpath != NULL &&
(dev = ddi_pathname_to_dev_t(vd->vdev_physpath)) != NODEV)
error = ldi_open_by_dev(&dev, OTYP_BLK, spa_mode(spa),
kcred, &dvd->vd_lh, zfs_li);
#endif
/*
* Note that we don't support the legacy auto-wholedisk support
* as above. This hasn't been used in a very long time and we
* don't need to propagate its oddities to this edge condition.
*/
if (error && vd->vdev_path != NULL)
error = ldi_open_by_name(vd->vdev_path, spa_mode(spa),
kcred, &dvd->vd_lh, zfs_li);
}
if (error) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
vdev_dbgmsg(vd, "vdev_disk_open: failed to open [error=%d]",
error);
return (error);
}
/*
* XXX Apple - We must not set or modify the devid. Import on Solaris/illumos
* expects a valid devid and fails if it cannot be decoded.
*/
#ifdef illumos
/*
* Now that the device has been successfully opened, update the devid
* if necessary.
*/
if (validate_devid && spa_writeable(spa) &&
ldi_get_devid(dvd->vd_lh, &devid) == 0) {
if (ddi_devid_compare(devid, dvd->vd_devid) != 0) {
char *vd_devid;
vd_devid = ddi_devid_str_encode(devid, dvd->vd_minor);
vdev_dbgmsg(vd, "vdev_disk_open: update devid from "
"'%s' to '%s'", vd->vdev_devid, vd_devid);
spa_strfree(vd->vdev_devid);
vd->vdev_devid = spa_strdup(vd_devid);
ddi_devid_str_free(vd_devid);
}
ddi_devid_free(devid);
}
#endif
/* XXX Apple to do, needs IORegistry physpath interface */
#ifdef illumos
/*
* Once a device is opened, verify that the physical device path (if
* available) is up to date.
*/
if (ldi_get_dev(dvd->vd_lh, &dev) == 0 &&
ldi_get_otyp(dvd->vd_lh, &otyp) == 0) {
char *physpath, *minorname;
physpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
minorname = NULL;
if (ddi_dev_pathname(dev, otyp, physpath) == 0 &&
ldi_get_minor_name(dvd->vd_lh, &minorname) == 0 &&
(vd->vdev_physpath == NULL ||
strcmp(vd->vdev_physpath, physpath) != 0)) {
if (vd->vdev_physpath)
示例14: spa_config_sync
/*
* Synchronize pool configuration to disk. This must be called with the
* namespace lock held. Synchronizing the pool cache is typically done after
* the configuration has been synced to the MOS. This exposes a window where
* the MOS config will have been updated but the cache file has not. If
* the system were to crash at that instant then the cached config may not
* contain the correct information to open the pool and an explicity import
* would be required.
*/
void
spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent)
{
spa_config_dirent_t *dp, *tdp;
nvlist_t *nvl;
char *pool_name;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (rootdir == NULL || !(spa_mode_global & FWRITE))
return;
/*
* Iterate over all cachefiles for the pool, past or present. When the
* cachefile is changed, the new one is pushed onto this list, allowing
* us to update previous cachefiles that no longer contain this pool.
*/
for (dp = list_head(&target->spa_config_list); dp != NULL;
dp = list_next(&target->spa_config_list, dp)) {
spa_t *spa = NULL;
if (dp->scd_path == NULL)
continue;
/*
* Iterate over all pools, adding any matching pools to 'nvl'.
*/
nvl = NULL;
while ((spa = spa_next(spa)) != NULL) {
/*
* Skip over our own pool if we're about to remove
* ourselves from the spa namespace or any pool that
* is readonly. Since we cannot guarantee that a
* readonly pool would successfully import upon reboot,
* we don't allow them to be written to the cache file.
*/
if ((spa == target && removing) ||
!spa_writeable(spa))
continue;
mutex_enter(&spa->spa_props_lock);
tdp = list_head(&spa->spa_config_list);
if (spa->spa_config == NULL ||
tdp->scd_path == NULL ||
strcmp(tdp->scd_path, dp->scd_path) != 0) {
mutex_exit(&spa->spa_props_lock);
continue;
}
if (nvl == NULL)
VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) {
VERIFY0(nvlist_lookup_string(spa->spa_config,
ZPOOL_CONFIG_POOL_NAME, &pool_name));
} else
pool_name = spa_name(spa);
VERIFY(nvlist_add_nvlist(nvl, pool_name,
spa->spa_config) == 0);
mutex_exit(&spa->spa_props_lock);
}
spa_config_write(dp, nvl);
nvlist_free(nvl);
}
/*
* Remove any config entries older than the current one.
*/
dp = list_head(&target->spa_config_list);
while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) {
list_remove(&target->spa_config_list, tdp);
if (tdp->scd_path != NULL)
spa_strfree(tdp->scd_path);
kmem_free(tdp, sizeof (spa_config_dirent_t));
}
spa_config_generation++;
if (postsysevent)
spa_event_notify(target, NULL, FM_EREPORT_ZFS_CONFIG_SYNC);
}
示例15: spa_config_sync
/*
* Synchronize pool configuration to disk. This must be called with the
* namespace lock held. Synchronizing the pool cache is typically done after
* the configuration has been synced to the MOS. This exposes a window where
* the MOS config will have been updated but the cache file has not. If
* the system were to crash at that instant then the cached config may not
* contain the correct information to open the pool and an explicity import
* would be required.
*/
void
spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent)
{
spa_config_dirent_t *dp, *tdp;
nvlist_t *nvl;
boolean_t ccw_failure;
int error;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (rootdir == NULL || !(spa_mode_global & FWRITE))
return;
/*
* Iterate over all cachefiles for the pool, past or present. When the
* cachefile is changed, the new one is pushed onto this list, allowing
* us to update previous cachefiles that no longer contain this pool.
*/
ccw_failure = B_FALSE;
for (dp = list_head(&target->spa_config_list); dp != NULL;
dp = list_next(&target->spa_config_list, dp)) {
spa_t *spa = NULL;
if (dp->scd_path == NULL)
continue;
/*
* Iterate over all pools, adding any matching pools to 'nvl'.
*/
nvl = NULL;
while ((spa = spa_next(spa)) != NULL) {
nvlist_t *nvroot = NULL;
/*
* Skip over our own pool if we're about to remove
* ourselves from the spa namespace or any pool that
* is readonly. Since we cannot guarantee that a
* readonly pool would successfully import upon reboot,
* we don't allow them to be written to the cache file.
*/
if ((spa == target && removing) ||
(spa_state(spa) == POOL_STATE_ACTIVE &&
!spa_writeable(spa)))
continue;
mutex_enter(&spa->spa_props_lock);
tdp = list_head(&spa->spa_config_list);
if (spa->spa_config == NULL ||
tdp->scd_path == NULL ||
strcmp(tdp->scd_path, dp->scd_path) != 0) {
mutex_exit(&spa->spa_props_lock);
continue;
}
if (nvl == NULL)
VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist(nvl, spa->spa_name,
spa->spa_config) == 0);
mutex_exit(&spa->spa_props_lock);
if (nvlist_lookup_nvlist(nvl, spa->spa_name, &nvroot) == 0)
spa_config_clean(nvroot);
}
error = spa_config_write(dp, nvl);
if (error != 0)
ccw_failure = B_TRUE;
nvlist_free(nvl);
}
if (ccw_failure) {
/*
* Keep trying so that configuration data is
* written if/when any temporary filesystem
* resource issues are resolved.
*/
if (target->spa_ccw_fail_time == 0) {
zfs_ereport_post(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE,
target, NULL, NULL, 0, 0);
}
target->spa_ccw_fail_time = gethrtime();
spa_async_request(target, SPA_ASYNC_CONFIG_UPDATE);
} else {
/*
* Do not rate limit future attempts to update
* the config cache.
*/
target->spa_ccw_fail_time = 0;
}
/*
//.........这里部分代码省略.........