本文整理汇总了C++中dmu_tx_is_syncing函数的典型用法代码示例。如果您正苦于以下问题:C++ dmu_tx_is_syncing函数的具体用法?C++ dmu_tx_is_syncing怎么用?C++ dmu_tx_is_syncing使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dmu_tx_is_syncing函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: dmu_objset_create_sync
static void
dmu_objset_create_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
{
dsl_dir_t *dd = arg1;
struct oscarg *oa = arg2;
dsl_dataset_t *ds;
blkptr_t *bp;
uint64_t dsobj;
ASSERT(dmu_tx_is_syncing(tx));
dsobj = dsl_dataset_create_sync(dd, oa->lastname,
oa->clone_parent, oa->flags, cr, tx);
VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, dsobj, FTAG, &ds));
bp = dsl_dataset_get_blkptr(ds);
if (BP_IS_HOLE(bp)) {
objset_impl_t *osi;
/* This is an empty dmu_objset; not a clone. */
osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds),
ds, bp, oa->type, tx);
if (oa->userfunc)
oa->userfunc(&osi->os, oa->userarg, cr, tx);
}
spa_history_internal_log(LOG_DS_CREATE, dd->dd_pool->dp_spa,
tx, cr, "dataset = %llu", dsobj);
dsl_dataset_rele(ds, FTAG);
}
示例2: dsl_dir_destroy_check
/* ARGSUSED */
int
dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
{
dsl_dir_t *dd = arg1;
dsl_pool_t *dp = dd->dd_pool;
objset_t *mos = dp->dp_meta_objset;
int err;
uint64_t count;
/*
* There should be exactly two holds, both from
* dsl_dataset_destroy: one on the dd directory, and one on its
* head ds. If there are more holds, then a concurrent thread is
* performing a lookup inside this dir while we're trying to destroy
* it. To minimize this possibility, we perform this check only
* in syncing context and fail the operation if we encounter
* additional holds. The dp_config_rwlock ensures that nobody else
* opens it after we check.
*/
if (dmu_tx_is_syncing(tx) && dmu_buf_refcount(dd->dd_dbuf) > 2)
return (EBUSY);
err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count);
if (err)
return (err);
if (count != 0)
return (EEXIST);
return (0);
}
示例3: log_internal
/*
* The nvlist will be consumed by this call.
*/
static void
log_internal(nvlist_t *nvl, const char *operation, spa_t *spa,
dmu_tx_t *tx, const char *fmt, va_list adx)
{
char *msg;
/*
* If this is part of creating a pool, not everything is
* initialized yet, so don't bother logging the internal events.
* Likewise if the pool is not writeable.
*/
if (tx->tx_txg == TXG_INITIAL || !spa_writeable(spa)) {
fnvlist_free(nvl);
return;
}
msg = kmem_vasprintf(fmt, adx);
fnvlist_add_string(nvl, ZPOOL_HIST_INT_STR, msg);
strfree(msg);
fnvlist_add_string(nvl, ZPOOL_HIST_INT_NAME, operation);
fnvlist_add_uint64(nvl, ZPOOL_HIST_TXG, tx->tx_txg);
if (dmu_tx_is_syncing(tx)) {
spa_history_log_sync(nvl, tx);
} else {
dsl_sync_task_nowait(spa_get_dsl(spa),
spa_history_log_sync, nvl, 0, ZFS_SPACE_CHECK_NONE, tx);
}
/* spa_history_log_sync() will free nvl */
}
示例4: vdev_indirect_births_add_entry
void
vdev_indirect_births_add_entry(vdev_indirect_births_t *vib,
uint64_t max_offset, uint64_t txg, dmu_tx_t *tx)
{
vdev_indirect_birth_entry_phys_t vibe;
uint64_t old_size;
uint64_t new_size;
vdev_indirect_birth_entry_phys_t *new_entries;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dsl_pool_sync_context(dmu_tx_pool(tx)));
ASSERT(vdev_indirect_births_verify(vib));
dmu_buf_will_dirty(vib->vib_dbuf, tx);
vibe.vibe_offset = max_offset;
vibe.vibe_phys_birth_txg = txg;
old_size = vdev_indirect_births_size_impl(vib);
dmu_write(vib->vib_objset, vib->vib_object, old_size, sizeof (vibe),
&vibe, tx);
vib->vib_phys->vib_count++;
new_size = vdev_indirect_births_size_impl(vib);
new_entries = kmem_alloc(new_size, KM_SLEEP);
if (old_size > 0) {
bcopy(vib->vib_entries, new_entries, old_size);
kmem_free(vib->vib_entries, old_size);
}
new_entries[vib->vib_phys->vib_count - 1] = vibe;
vib->vib_entries = new_entries;
}
示例5: dmu_objset_create_sync
static void
dmu_objset_create_sync(void *arg1, void *arg2, dmu_tx_t *tx)
{
dsl_dir_t *dd = arg1;
spa_t *spa = dd->dd_pool->dp_spa;
struct oscarg *oa = arg2;
uint64_t obj;
ASSERT(dmu_tx_is_syncing(tx));
obj = dsl_dataset_create_sync(dd, oa->lastname,
oa->clone_origin, oa->crypto_ctx, oa->flags, oa->cr, tx);
if (oa->clone_origin == NULL) {
dsl_pool_t *dp = dd->dd_pool;
dsl_dataset_t *ds;
blkptr_t *bp;
objset_t *os;
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, obj, FTAG, &ds));
bp = dsl_dataset_get_blkptr(ds);
ASSERT(BP_IS_HOLE(bp));
os = dmu_objset_create_impl(spa, ds, bp, oa->type,
oa->crypto_ctx, tx);
if (oa->userfunc)
oa->userfunc(os, oa->userarg, oa->cr, tx);
dsl_dataset_rele(ds, FTAG);
}
示例6: spa_history_internal_log
void
spa_history_internal_log(history_internal_events_t event, spa_t *spa,
dmu_tx_t *tx, cred_t *cr, const char *fmt, ...)
{
history_arg_t *hap;
char *str;
va_list adx;
hap = kmem_alloc(sizeof (history_arg_t), KM_SLEEP);
str = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP);
va_start(adx, fmt);
(void) vsnprintf(str, HIS_MAX_RECORD_LEN, fmt, adx);
va_end(adx);
hap->ha_log_type = LOG_INTERNAL;
hap->ha_history_str = str;
hap->ha_event = event;
hap->ha_zone[0] = '\0';
if (dmu_tx_is_syncing(tx)) {
spa_history_log_sync(spa, hap, cr, tx);
} else {
dsl_sync_task_do_nowait(spa_get_dsl(spa), NULL,
spa_history_log_sync, spa, hap, 0, tx);
}
/* spa_history_log_sync() will free hap and str */
}
示例7: log_internal
static void
log_internal(history_internal_events_t event, spa_t *spa,
dmu_tx_t *tx, const char *fmt, va_list adx)
{
history_arg_t *ha;
va_list adx_copy;
/*
* If this is part of creating a pool, not everything is
* initialized yet, so don't bother logging the internal events.
*/
if (tx->tx_txg == TXG_INITIAL)
return;
ha = kmem_alloc(sizeof (history_arg_t), KM_SLEEP);
va_copy(adx_copy, adx);
ha->ha_history_str = kmem_vasprintf(fmt, adx_copy);
va_end(adx_copy);
ha->ha_log_type = LOG_INTERNAL;
ha->ha_event = event;
ha->ha_zone = NULL;
ha->ha_uid = 0;
if (dmu_tx_is_syncing(tx)) {
spa_history_log_sync(spa, ha, tx);
} else {
dsl_sync_task_do_nowait(spa_get_dsl(spa), NULL,
spa_history_log_sync, spa, ha, 0, tx);
}
/* spa_history_log_sync() will free ha and strings */
}
示例8: spa_condense_indirect_complete_sync
/*
* This sync task completes (finishes) a condense, deleting the old
* mapping and replacing it with the new one.
*/
static void
spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_condensing_indirect_t *sci = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
objset_t *mos = spa->spa_meta_objset;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
uint64_t new_count =
vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT3P(sci, ==, spa->spa_condensing_indirect);
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
}
ASSERT(vic->vic_mapping_object != 0);
ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
ASSERT(scip->scip_next_mapping_object != 0);
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
/*
* Reset vdev_indirect_mapping to refer to the new object.
*/
rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vd->vdev_indirect_mapping = sci->sci_new_mapping;
rw_exit(&vd->vdev_indirect_rwlock);
sci->sci_new_mapping = NULL;
vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
vic->vic_mapping_object = scip->scip_next_mapping_object;
scip->scip_next_mapping_object = 0;
space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
scip->scip_prev_obsolete_sm_object = 0;
scip->scip_vdev = 0;
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, tx));
spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
spa->spa_condensing_indirect = NULL;
zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
"new mapping object %llu has %llu entries "
"(was %llu entries)",
vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object,
new_count, old_count);
vdev_config_dirty(spa->spa_root_vdev);
}
示例9: vdev_indirect_births_alloc
uint64_t
vdev_indirect_births_alloc(objset_t *os, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
return (dmu_object_alloc(os,
DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
DMU_OTN_UINT64_METADATA, sizeof (vdev_indirect_birth_phys_t),
tx));
}
示例10: dmu_tx_add_new_object
void
dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
{
/*
* If we're syncing, they can manipulate any object anyhow, and
* the hold on the dnode_t can cause problems.
*/
if (!dmu_tx_is_syncing(tx))
(void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
}
示例11: spa_vdev_indirect_mark_obsolete
/*
* Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
* wrapper is provided because the DMU does not know about vdev_t's and
* cannot directly call vdev_indirect_mark_obsolete.
*/
void
spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
uint64_t size, dmu_tx_t *tx)
{
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
ASSERT(dmu_tx_is_syncing(tx));
/* The DMU can only remap indirect vdevs. */
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
vdev_indirect_mark_obsolete(vd, offset, size);
}
示例12: dsl_dataset_user_hold_check
static int
dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_user_hold_arg_t *dduha = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
nvpair_t *pair;
if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS)
return (SET_ERROR(ENOTSUP));
if (!dmu_tx_is_syncing(tx))
return (0);
for (pair = nvlist_next_nvpair(dduha->dduha_holds, NULL);
pair != NULL; pair = nvlist_next_nvpair(dduha->dduha_holds, pair)) {
dsl_dataset_t *ds;
int error = 0;
char *htag, *name;
/* must be a snapshot */
name = nvpair_name(pair);
if (strchr(name, '@') == NULL)
error = SET_ERROR(EINVAL);
if (error == 0)
error = nvpair_value_string(pair, &htag);
if (error == 0)
error = dsl_dataset_hold(dp, name, FTAG, &ds);
if (error == 0) {
error = dsl_dataset_user_hold_check_one(ds, htag,
dduha->dduha_minor != 0, tx);
dsl_dataset_rele(ds, FTAG);
}
if (error == 0) {
fnvlist_add_string(dduha->dduha_chkholds, name, htag);
} else {
/*
* We register ENOENT errors so they can be correctly
* reported if needed, such as when all holds fail.
*/
fnvlist_add_int32(dduha->dduha_errlist, name, error);
if (error != ENOENT)
return (error);
}
}
return (0);
}
示例13: dsl_dataset_user_release_check
static int
dsl_dataset_user_release_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_user_release_arg_t *ddura;
dsl_holdfunc_t *holdfunc;
dsl_pool_t *dp;
nvpair_t *pair;
if (!dmu_tx_is_syncing(tx))
return (0);
dp = dmu_tx_pool(tx);
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
ddura = arg;
holdfunc = ddura->ddura_holdfunc;
for (pair = nvlist_next_nvpair(ddura->ddura_holds, NULL);
pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_holds, pair)) {
int error;
dsl_dataset_t *ds;
nvlist_t *holds;
const char *snapname = nvpair_name(pair);
error = nvpair_value_nvlist(pair, &holds);
if (error != 0)
error = (SET_ERROR(EINVAL));
else
error = holdfunc(dp, snapname, FTAG, &ds);
if (error == 0) {
error = dsl_dataset_user_release_check_one(ddura, ds,
holds, snapname);
dsl_dataset_rele(ds, FTAG);
}
if (error != 0) {
if (ddura->ddura_errlist != NULL) {
fnvlist_add_int32(ddura->ddura_errlist,
snapname, error);
}
/*
* Non-existent snapshots are put on the errlist,
* but don't cause an overall failure.
*/
if (error != ENOENT)
return (error);
}
}
return (0);
}
示例14: spa_condense_indirect_commit_sync
/*
* This sync task appends entries to the new mapping object.
*/
static void
spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
{
spa_condensing_indirect_t *sci = arg;
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(sci, ==, spa->spa_condensing_indirect);
vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
&sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
}
示例15: spa_condense_indirect_start_sync
/*
* Sync task to begin the condensing process.
*/
void
spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
ASSERT0(scip->scip_next_mapping_object);
ASSERT0(scip->scip_prev_obsolete_sm_object);
ASSERT0(scip->scip_vdev);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
ASSERT(obsolete_sm_obj != 0);
scip->scip_vdev = vd->vdev_id;
scip->scip_next_mapping_object =
vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
/*
* We don't need to allocate a new space map object, since
* vdev_indirect_sync_obsolete will allocate one when needed.
*/
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
sizeof (*scip) / sizeof (uint64_t), scip, tx));
ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
"posm=%llu nm=%llu",
vd->vdev_id, dmu_tx_get_txg(tx),
(u_longlong_t)scip->scip_prev_obsolete_sm_object,
(u_longlong_t)scip->scip_next_mapping_object);
zthr_wakeup(spa->spa_condense_zthr);
}