本文整理汇总了C++中AVL_NEXT函数的典型用法代码示例。如果您正苦于以下问题:C++ AVL_NEXT函数的具体用法?C++ AVL_NEXT怎么用?C++ AVL_NEXT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了AVL_NEXT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: invert_frags
/*
* Given a tree of fragment_ts, each element of which has an integral
* sub-tree of claimant_ts, produce a tree of inode_dup_ts, each element
* of which has an integral sub-tree of reference_ts.
*/
static void
invert_frags(avl_tree_t *source, avl_tree_t *target)
{
fragment_t *src_frag;
claimant_t *src_claim;
inode_dup_t *tgt_inode;
inode_dup_t tgt_inode_key;
reference_t *tgt_ref;
reference_t tgt_ref_key;
avl_index_t where;
avl_create(target, by_ino_cmp, sizeof (inode_dup_t),
OFFSETOF(inode_dup_t, id_avl));
src_frag = avl_first(source);
while (src_frag != NULL) {
src_claim = avl_first(&src_frag->fr_claimants);
while (src_claim != NULL) {
/*
* Have we seen this inode before?
*/
tgt_inode_key.id_ino = src_claim->cl_inode;
tgt_inode = avl_find(target, (void *)&tgt_inode_key,
&where);
if (tgt_inode == NULL) {
/*
* No, so set up a record for it.
*/
tgt_inode = new_inode_dup(src_claim->cl_inode);
avl_insert(target, (void *)tgt_inode, where);
}
/*
* Now, how about this logical fragment? In
* theory, we should never see a duplicate, since
* a given lfn only exists once for a given inode.
* As such, we ignore duplicate hits.
*/
tgt_ref_key.ref_lfn = src_claim->cl_lfn;
tgt_ref = avl_find(&tgt_inode->id_fragments,
(void *)&tgt_ref_key, &where);
if (tgt_ref == NULL) {
/*
* Haven't seen it, add it.
*/
tgt_ref = (reference_t *)malloc(
sizeof (reference_t));
if (tgt_ref == NULL)
errexit("Out of memory in "
"invert_frags\n");
tgt_ref->ref_lfn = src_claim->cl_lfn;
tgt_ref->ref_pfn = src_frag->fr_pfn;
avl_insert(&tgt_inode->id_fragments,
(void *)tgt_ref, where);
}
src_claim = AVL_NEXT(&src_frag->fr_claimants,
src_claim);
}
src_frag = AVL_NEXT(source, src_frag);
}
}
示例2: pppt_disable_svc
/*
* pppt_disable_svc
*
* clean up all existing sessions and deregister targets from STMF
*/
static void
pppt_disable_svc(void)
{
pppt_tgt_t *tgt, *next_tgt;
avl_tree_t delete_target_list;
ASSERT(pppt_global.global_svc_state == PSS_DISABLING);
avl_create(&delete_target_list,
pppt_tgt_avl_compare, sizeof (pppt_tgt_t),
offsetof(pppt_tgt_t, target_global_ln));
PPPT_GLOBAL_LOCK();
for (tgt = avl_first(&pppt_global.global_target_list);
tgt != NULL;
tgt = next_tgt) {
next_tgt = AVL_NEXT(&pppt_global.global_target_list, tgt);
avl_remove(&pppt_global.global_target_list, tgt);
avl_add(&delete_target_list, tgt);
pppt_tgt_async_delete(tgt);
}
PPPT_GLOBAL_UNLOCK();
for (tgt = avl_first(&delete_target_list);
tgt != NULL;
tgt = next_tgt) {
next_tgt = AVL_NEXT(&delete_target_list, tgt);
mutex_enter(&tgt->target_mutex);
while ((tgt->target_refcount > 0) ||
(tgt->target_state != TS_DELETING)) {
cv_wait(&tgt->target_cv, &tgt->target_mutex);
}
mutex_exit(&tgt->target_mutex);
avl_remove(&delete_target_list, tgt);
pppt_tgt_destroy(tgt);
}
taskq_destroy(pppt_global.global_sess_taskq);
taskq_destroy(pppt_global.global_dispatch_taskq);
avl_destroy(&pppt_global.global_sess_list);
avl_destroy(&pppt_global.global_target_list);
(void) stmf_deregister_port_provider(pppt_global.global_pp);
stmf_free(pppt_global.global_dbuf_store);
pppt_global.global_dbuf_store = NULL;
stmf_free(pppt_global.global_pp);
pppt_global.global_pp = NULL;
}
示例3: vdev_initialize_ranges
static int
vdev_initialize_ranges(vdev_t *vd, abd_t *data)
{
avl_tree_t *rt = &vd->vdev_initialize_tree->rt_root;
for (range_seg_t *rs = avl_first(rt); rs != NULL;
rs = AVL_NEXT(rt, rs)) {
uint64_t size = rs->rs_end - rs->rs_start;
/* Split range into legally-sized physical chunks */
uint64_t writes_required =
((size - 1) / zfs_initialize_chunk_size) + 1;
for (uint64_t w = 0; w < writes_required; w++) {
int error;
error = vdev_initialize_write(vd,
VDEV_LABEL_START_SIZE + rs->rs_start +
(w * zfs_initialize_chunk_size),
MIN(size - (w * zfs_initialize_chunk_size),
zfs_initialize_chunk_size), data);
if (error != 0)
return (error);
}
}
return (0);
}
示例4: sa_find_layout
static void
sa_find_layout(objset_t *os, uint64_t hash, sa_attr_type_t *attrs,
int count, dmu_tx_t *tx, sa_lot_t **lot)
{
sa_lot_t *tb, tbsearch;
avl_index_t loc;
sa_os_t *sa = os->os_sa;
boolean_t found = B_FALSE;
mutex_enter(&sa->sa_lock);
tbsearch.lot_hash = hash;
tbsearch.lot_instance = 0;
tb = avl_find(&sa->sa_layout_hash_tree, &tbsearch, &loc);
if (tb) {
for (; tb && tb->lot_hash == hash;
tb = AVL_NEXT(&sa->sa_layout_hash_tree, tb)) {
if (sa_layout_equal(tb, attrs, count) == 0) {
found = B_TRUE;
break;
}
}
}
if (!found) {
tb = sa_add_layout_entry(os, attrs, count,
avl_numnodes(&sa->sa_layout_num_tree), hash, B_TRUE, tx);
}
mutex_exit(&sa->sa_lock);
*lot = tb;
}
示例5: smb_cache_iterate
/*
* Iterate the cache using the given cursor.
*
* Data is copied to the given buffer ('data') using the copy function
* specified at cache creation time.
*
* If the cache is modified while an iteration is in progress it causes
* the iteration to finish prematurely. This is to avoid the need to lock
* the whole cache while it is being iterated.
*/
boolean_t
smb_cache_iterate(smb_cache_t *chandle, smb_cache_cursor_t *cursor, void *data)
{
smb_cache_node_t *node;
assert(data);
if (smb_cache_rdlock(chandle) != 0)
return (B_FALSE);
if (cursor->cc_sequence != chandle->ch_sequence) {
smb_cache_unlock(chandle);
return (B_FALSE);
}
if (cursor->cc_next == NULL)
node = avl_first(&chandle->ch_cache);
else
node = AVL_NEXT(&chandle->ch_cache, cursor->cc_next);
if (node != NULL)
chandle->ch_copy(node->cn_data, data, chandle->ch_datasz);
cursor->cc_next = node;
smb_cache_unlock(chandle);
return (node != NULL);
}
示例6: mze_find_unused_cd
static uint32_t
mze_find_unused_cd(zap_t *zap, uint64_t hash)
{
mzap_ent_t mze_tofind;
mzap_ent_t *mze;
avl_index_t idx;
avl_tree_t *avl = &zap->zap_m.zap_avl;
uint32_t cd;
ASSERT(zap->zap_ismicro);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
mze_tofind.mze_hash = hash;
mze_tofind.mze_phys.mze_cd = 0;
cd = 0;
for (mze = avl_find(avl, &mze_tofind, &idx);
mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) {
if (mze->mze_phys.mze_cd != cd)
break;
cd++;
}
return (cd);
}
示例7: mze_find
static mzap_ent_t *
mze_find(zap_t *zap, const char *name, uint64_t hash)
{
mzap_ent_t mze_tofind;
mzap_ent_t *mze;
avl_index_t idx;
avl_tree_t *avl = &zap->zap_m.zap_avl;
ASSERT(zap->zap_ismicro);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT3U(zap_hash(zap, name), ==, hash);
if (strlen(name) >= sizeof (mze_tofind.mze_phys.mze_name))
return (NULL);
mze_tofind.mze_hash = hash;
mze_tofind.mze_phys.mze_cd = 0;
mze = avl_find(avl, &mze_tofind, &idx);
if (mze == NULL)
mze = avl_nearest(avl, idx, AVL_AFTER);
for (; mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) {
if (strcmp(name, mze->mze_phys.mze_name) == 0)
return (mze);
}
return (NULL);
}
示例8: zfsctl_unmount_snapshots
/*
* Traverse all mounted snapshots and attempt to unmount them. This
* is best effort, on failure EEXIST is returned and count will be set
* to the number of file snapshots which could not be unmounted.
*/
int
zfsctl_unmount_snapshots(zfs_sb_t *zsb, int flags, int *count)
{
zfs_snapentry_t *sep, *next;
int error = 0;
*count = 0;
ASSERT(zsb->z_ctldir != NULL);
mutex_enter(&zsb->z_ctldir_lock);
sep = avl_first(&zsb->z_ctldir_snaps);
while (sep != NULL) {
next = AVL_NEXT(&zsb->z_ctldir_snaps, sep);
avl_remove(&zsb->z_ctldir_snaps, sep);
mutex_exit(&zsb->z_ctldir_lock);
error = __zfsctl_unmount_snapshot(sep, flags);
mutex_enter(&zsb->z_ctldir_lock);
if (error == EBUSY) {
avl_add(&zsb->z_ctldir_snaps, sep);
(*count)++;
} else {
zfsctl_sep_free(sep);
}
sep = next;
}
mutex_exit(&zsb->z_ctldir_lock);
return ((*count > 0) ? EEXIST : 0);
}
示例9: report_dups
/*
* Dump the duplicates table in a relatively user-friendly form.
* The idea is that the output can be useful when trying to manually
* work out which block belongs to which of the claiming inodes.
*
* What we have is a tree of duplicates indexed by physical
* fragment number. What we want to report is:
*
* Inode %d:
* Logical Offset 0x%08llx, Physical Fragment %d
* Logical Offsets 0x%08llx - 0x%08llx, Physical Fragments %d - %d
* ...
* Inode %d:
* Logical Offsets 0x%08llx - 0x%08llx, Physical Fragments %d - %d
* ...
*/
int
report_dups(int quiet)
{
int overlaps;
inode_dup_t *inode;
fragment_t *dup;
avl_tree_t inode_frags;
overlaps = 0;
ASSERT(have_dups());
/*
* Figure out how many actual dups are still around.
* This tells us whether or not we can mark the
* filesystem clean.
*/
dup = avl_first(&dup_frags);
while (dup != NULL) {
if (avl_numnodes(&dup->fr_claimants) > 1) {
overlaps++;
break;
}
dup = AVL_NEXT(&dup_frags, dup);
}
/*
* Now report on every object that still exists that
* had *any* dups associated with it.
*/
if (!quiet) {
(void) puts("\nSome blocks that were found to be in "
"multiple files are still\nassigned to "
"file(s).\nFragments sorted by inode and "
"logical offsets:");
invert_frags(&dup_frags, &inode_frags);
inode = avl_first(&inode_frags);
while (inode != NULL) {
report_inode_dups(inode);
inode = AVL_NEXT(&inode_frags, inode);
}
(void) printf("\n");
free_invert_frags(&inode_frags);
}
return (overlaps);
}
示例10: range_tree_walk
void
range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg)
{
range_seg_t *rs;
for (rs = avl_first(&rt->rt_root); rs; rs = AVL_NEXT(&rt->rt_root, rs))
func(arg, rs->rs_start, rs->rs_end - rs->rs_start);
}
示例11: sa_add_layout_entry
static sa_lot_t *
sa_add_layout_entry(objset_t *os, sa_attr_type_t *attrs, int attr_count,
uint64_t lot_num, uint64_t hash, boolean_t zapadd, dmu_tx_t *tx)
{
sa_os_t *sa = os->os_sa;
sa_lot_t *tb, *findtb;
int i;
avl_index_t loc;
ASSERT(MUTEX_HELD(&sa->sa_lock));
tb = kmem_zalloc(sizeof (sa_lot_t), KM_SLEEP);
tb->lot_attr_count = attr_count;
tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
KM_SLEEP);
bcopy(attrs, tb->lot_attrs, sizeof (sa_attr_type_t) * attr_count);
tb->lot_num = lot_num;
tb->lot_hash = hash;
tb->lot_instance = 0;
if (zapadd) {
char attr_name[8];
if (sa->sa_layout_attr_obj == 0) {
sa->sa_layout_attr_obj = zap_create(os,
DMU_OT_SA_ATTR_LAYOUTS, DMU_OT_NONE, 0, tx);
VERIFY(zap_add(os, sa->sa_master_obj, SA_LAYOUTS, 8, 1,
&sa->sa_layout_attr_obj, tx) == 0);
}
(void) snprintf(attr_name, sizeof (attr_name),
"%d", (int)lot_num);
VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
attr_name, 2, attr_count, attrs, tx));
}
list_create(&tb->lot_idx_tab, sizeof (sa_idx_tab_t),
offsetof(sa_idx_tab_t, sa_next));
for (i = 0; i != attr_count; i++) {
if (sa->sa_attr_table[tb->lot_attrs[i]].sa_length == 0)
tb->lot_var_sizes++;
}
avl_add(&sa->sa_layout_num_tree, tb);
/* verify we don't have a hash collision */
if ((findtb = avl_find(&sa->sa_layout_hash_tree, tb, &loc)) != NULL) {
for (; findtb && findtb->lot_hash == hash;
findtb = AVL_NEXT(&sa->sa_layout_hash_tree, findtb)) {
if (findtb->lot_instance != tb->lot_instance)
break;
tb->lot_instance++;
}
}
avl_add(&sa->sa_layout_hash_tree, tb);
return (tb);
}
示例12: nlm_do_free_all
/*
* NLM_FREE_ALL, NLM4_FREE_ALL
*
* Destroy all lock state for the calling client.
*/
void
nlm_do_free_all(nlm4_notify *argp, void *res, struct svc_req *sr)
{
struct nlm_globals *g;
struct nlm_host_list host_list;
struct nlm_host *hostp;
TAILQ_INIT(&host_list);
g = zone_getspecific(nlm_zone_key, curzone);
/* Serialize calls to clean locks. */
mutex_enter(&g->clean_lock);
/*
* Find all hosts that have the given node name and put them on a
* local list.
*/
mutex_enter(&g->lock);
for (hostp = avl_first(&g->nlm_hosts_tree); hostp != NULL;
hostp = AVL_NEXT(&g->nlm_hosts_tree, hostp)) {
if (strcasecmp(hostp->nh_name, argp->name) == 0) {
/*
* If needed take the host out of the idle list since
* we are taking a reference.
*/
if (hostp->nh_flags & NLM_NH_INIDLE) {
TAILQ_REMOVE(&g->nlm_idle_hosts, hostp,
nh_link);
hostp->nh_flags &= ~NLM_NH_INIDLE;
}
hostp->nh_refs++;
TAILQ_INSERT_TAIL(&host_list, hostp, nh_link);
}
}
mutex_exit(&g->lock);
/* Free locks for all hosts on the local list. */
while (!TAILQ_EMPTY(&host_list)) {
hostp = TAILQ_FIRST(&host_list);
TAILQ_REMOVE(&host_list, hostp, nh_link);
/*
* Note that this does not do client-side cleanup.
* We want to do that ONLY if statd tells us the
* server has restarted.
*/
nlm_host_notify_server(hostp, argp->state);
nlm_host_release(g, hostp);
}
mutex_exit(&g->clean_lock);
(void) res;
(void) sr;
}
示例13: zfsctl_lookup_objset
int
zfsctl_lookup_objset(struct super_block *sb, uint64_t objsetid, zfs_sb_t **zsbp)
{
zfs_sb_t *zsb = sb->s_fs_info;
struct super_block *sbp;
zfs_snapentry_t *sep;
uint64_t id;
int error;
ASSERT(zsb->z_ctldir != NULL);
mutex_enter(&zsb->z_ctldir_lock);
/*
* Verify that the snapshot is mounted.
*/
sep = avl_first(&zsb->z_ctldir_snaps);
while (sep != NULL) {
error = dmu_snapshot_lookup(zsb->z_os, sep->se_name, &id);
if (error)
goto out;
if (id == objsetid)
break;
sep = AVL_NEXT(&zsb->z_ctldir_snaps, sep);
}
if (sep != NULL) {
/*
* Lookup the mounted root rather than the covered mount
* point. This may fail if the snapshot has just been
* unmounted by an unrelated user space process. This
* race cannot occur to an expired mount point because
* we hold the zsb->z_ctldir_lock to prevent the race.
*/
sbp = zpl_sget(&zpl_fs_type, zfsctl_test_super,
zfsctl_set_super, 0, &id);
if (IS_ERR(sbp)) {
error = -PTR_ERR(sbp);
} else {
*zsbp = sbp->s_fs_info;
deactivate_super(sbp);
}
} else {
error = EINVAL;
}
out:
mutex_exit(&zsb->z_ctldir_lock);
ASSERT3S(error, >=, 0);
return (error);
}
示例14: pppt_task_lookup
pppt_task_t *
pppt_task_lookup(stmf_ic_msgid_t msgid)
{
pppt_tgt_t *tgt;
pppt_sess_t *sess;
pppt_task_t lookup_task;
pppt_task_t *result;
bzero(&lookup_task, sizeof (lookup_task));
lookup_task.pt_task_id = msgid;
PPPT_GLOBAL_LOCK();
for (tgt = avl_first(&pppt_global.global_target_list); tgt != NULL;
tgt = AVL_NEXT(&pppt_global.global_target_list, tgt)) {
mutex_enter(&tgt->target_mutex);
for (sess = avl_first(&tgt->target_sess_list); sess != NULL;
sess = AVL_NEXT(&tgt->target_sess_list, sess)) {
mutex_enter(&sess->ps_mutex);
if ((result = avl_find(&sess->ps_task_list,
&lookup_task, NULL)) != NULL) {
if (pppt_task_hold(result) !=
PPPT_STATUS_SUCCESS) {
result = NULL;
}
mutex_exit(&sess->ps_mutex);
mutex_exit(&tgt->target_mutex);
PPPT_GLOBAL_UNLOCK();
return (result);
}
mutex_exit(&sess->ps_mutex);
}
mutex_exit(&tgt->target_mutex);
}
PPPT_GLOBAL_UNLOCK();
return (NULL);
}
示例15: _avl_walk_advance
static void *
_avl_walk_advance(uu_avl_walk_t *wp, uu_avl_t *ap)
{
void *np = wp->uaw_next_result;
avl_tree_t *t = &ap->ua_tree;
if (np == NULL)
return (NULL);
wp->uaw_next_result = (wp->uaw_dir > 0)? AVL_NEXT(t, np) :
AVL_PREV(t, np);
return (np);
}