本文整理汇总了C++中RB_CLEAR_NODE函数的典型用法代码示例。如果您正苦于以下问题:C++ RB_CLEAR_NODE函数的具体用法?C++ RB_CLEAR_NODE怎么用?C++ RB_CLEAR_NODE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了RB_CLEAR_NODE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: calloc
static struct itree_node *itnode_init(unsigned long long uuid, unsigned long inmem)
{
struct itree_node *itnode = NULL;
itnode = calloc(1, sizeof(*itnode));
if (!itnode)
return NULL;
RB_CLEAR_NODE(&itnode->inodes_node);
RB_CLEAR_NODE(&itnode->sorted_node);
itnode->uuid = uuid;
itnode->inmem = inmem;
return itnode;
}
示例2: kmem_cache_alloc
static struct eam_entry *eamt_create_entry(struct ipv6_prefix *ip6, struct ipv4_prefix *ip4)
{
struct eam_entry *entry;
entry = kmem_cache_alloc(entry_cache, GFP_ATOMIC);
if (!entry)
return NULL;
entry->pref4 = *ip4;
entry->pref6 = *ip6;
RB_CLEAR_NODE(&entry->tree4_hook);
RB_CLEAR_NODE(&entry->tree6_hook);
return entry;
}
示例3: kzalloc
/**
* bus1_queue_entry_new() - allocate new queue entry
* @seq: initial sequence number
* @n_files: number of files to carry
*
* This allocates a new queue-entry with pre-allocated space to carry the given
* amount of file descriptors. The queue entry is initially unlinked and no
* slice is associated to it. The caller is free to modify the files array and
* the slice as they wish.
*
* Return: Pointer to slice, ERR_PTR on failure.
*/
struct bus1_queue_entry *bus1_queue_entry_new(size_t n_files)
{
struct bus1_queue_entry *entry;
entry = kzalloc(sizeof(*entry) + n_files * sizeof(struct file *),
GFP_KERNEL);
if (!entry)
return ERR_PTR(-ENOMEM);
RB_CLEAR_NODE(&entry->transaction.rb);
RB_CLEAR_NODE(&entry->rb);
entry->n_files = n_files;
return entry;
}
示例4: namei_add_inode
static void namei_add_inode(ext2_ino_t ino)
{
struct target_inode *t, *n;
struct rb_node **p = &namei_targets.rb_node;
struct rb_node *parent = NULL;
n = malloc(sizeof(*n));
if (!n) {
fprintf(stderr, "Unable to allocate space for inode\n");
exit(1);
}
RB_CLEAR_NODE(&n->rb_node);
n->ino = ino;
n->nlinks = 1;
while (*p) {
parent = *p;
t = rb_entry(parent, struct target_inode, rb_node);
if (ino < t->ino)
p = &(*p)->rb_left;
else if (ino > t->ino)
p = &(*p)->rb_right;
else
return;
}
rb_link_node(&n->rb_node, parent, p);
rb_insert_color(&n->rb_node, &namei_targets);
}
示例5: i915_gem_userptr_init__mmu_notifier
static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
unsigned flags)
{
struct i915_mmu_notifier *mn;
struct i915_mmu_object *mo;
if (flags & I915_USERPTR_UNSYNCHRONIZED)
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
if (WARN_ON(obj->userptr.mm == NULL))
return -EINVAL;
mn = i915_mmu_notifier_find(obj->userptr.mm);
if (IS_ERR(mn))
return PTR_ERR(mn);
mo = kzalloc(sizeof(*mo), GFP_KERNEL);
if (!mo)
return -ENOMEM;
mo->mn = mn;
mo->obj = obj;
mo->it.start = obj->userptr.ptr;
mo->it.last = obj->userptr.ptr + obj->base.size - 1;
RB_CLEAR_NODE(&mo->it.rb);
obj->userptr.mmu_object = mo;
return 0;
}
示例6: rb_init_node
void rb_init_node(LPRB_NODE rb)
{
rb->rb_parent_color = 0;
rb->rb_right = NULL;
rb->rb_left = NULL;
RB_CLEAR_NODE(rb);
}
示例7: bus1_queue_unlink
/**
* bus1_queue_unlink() - unlink entry from sorted queue
* @queue: queue to unlink from
* @entry: entry to unlink, or NULL
*
* This unlinks @entry from the message queue @queue. If the entry was already
* unlinked (or NULL is passed), this is a no-op.
*
* The caller must hold the write-side peer-lock of the parent peer.
*
* Return: True if the queue became readable with this call. This can happen if
* you unlink a staging entry, and thus a waiting entry becomes ready.
*/
bool bus1_queue_unlink(struct bus1_queue *queue,
struct bus1_queue_entry *entry)
{
struct rb_node *node;
if (!entry || RB_EMPTY_NODE(&entry->rb))
return false;
node = rcu_dereference_protected(queue->front,
bus1_queue_is_held(queue));
if (node == &entry->rb) {
node = rb_next(node);
if (node && bus1_queue_entry(node)->seq & 1)
node = NULL;
rcu_assign_pointer(queue->front, node);
} else {
node = NULL;
}
rb_erase(&entry->rb, &queue->messages);
RB_CLEAR_NODE(&entry->rb);
/*
* If this entry was non-ready in front, but the next entry exists and
* is ready, then the queue becomes readable if you pop the front.
*/
return (entry->seq & 1) && node && !(bus1_queue_entry(node)->seq & 1);
}
示例8: rb_init_node
static void rb_init_node(struct rb_node *rb)
{
rb->rb_parent_color = 0;
rb->rb_right = NULL;
rb->rb_left = NULL;
RB_CLEAR_NODE(rb);
}
示例9: zswap_rb_erase
static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
{
if (!RB_EMPTY_NODE(&entry->rbnode)) {
rb_erase(&entry->rbnode, root);
RB_CLEAR_NODE(&entry->rbnode);
}
}
示例10: machine__init
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
{
map_groups__init(&machine->kmaps);
RB_CLEAR_NODE(&machine->rb_node);
INIT_LIST_HEAD(&machine->user_dsos);
INIT_LIST_HEAD(&machine->kernel_dsos);
machine->threads = RB_ROOT;
INIT_LIST_HEAD(&machine->dead_threads);
machine->last_match = NULL;
machine->kmaps.machine = machine;
machine->pid = pid;
machine->symbol_filter = NULL;
machine->id_hdr_size = 0;
machine->root_dir = strdup(root_dir);
if (machine->root_dir == NULL)
return -ENOMEM;
if (pid != HOST_KERNEL_ID) {
struct thread *thread = machine__findnew_thread(machine, 0,
pid);
char comm[64];
if (thread == NULL)
return -ENOMEM;
snprintf(comm, sizeof(comm), "[guest/%d]", pid);
thread__set_comm(thread, comm, 0);
}
return 0;
}
示例11: bus1_queue_relink
/**
* bus1_queue_relink() - change sequence number of an entry
* @queue: queue to operate on
* @entry: entry to relink
* @seq: sequence number to set
*
* This changes the sequence number of @entry to @seq. The caller must
* guarantee that the entry was already linked with an odd-numbered sequence
* number. This will unlink the entry, change the sequence number and link it
* again.
*
* The caller must hold the write-side peer-lock of the parent peer.
*
* Return: True if the queue became readable with this call.
*/
bool bus1_queue_relink(struct bus1_queue *queue,
struct bus1_queue_entry *entry,
u64 seq)
{
struct rb_node *front;
if (WARN_ON(seq == 0 ||
RB_EMPTY_NODE(&entry->rb) ||
!(entry->seq & 1)))
return false;
bus1_queue_assert_held(queue);
/* remember front, cannot point to @entry */
front = rcu_access_pointer(queue->front);
WARN_ON(front == &entry->rb);
/* drop from rb-tree and insert again */
rb_erase(&entry->rb, &queue->messages);
RB_CLEAR_NODE(&entry->rb);
bus1_queue_link(queue, entry, seq);
/* if this uncovered a front, then the queue became readable */
return !front && rcu_access_pointer(queue->front);
}
示例12: del_object
static void del_object(struct i915_mmu_object *mo)
{
if (RB_EMPTY_NODE(&mo->it.rb))
return;
interval_tree_remove(&mo->it, &mo->mn->objects);
RB_CLEAR_NODE(&mo->it.rb);
}
示例13: tsk_fork_get_node
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
unsigned long *stackend;
int node = tsk_fork_get_node(orig);
int err;
prepare_to_copy(orig);
tsk = alloc_task_struct_node(node);
if (!tsk)
return NULL;
ti = alloc_thread_info_node(tsk, node);
if (!ti) {
free_task_struct(tsk);
return NULL;
}
err = arch_dup_task_struct(tsk, orig);
if (err)
goto out;
tsk->stack = ti;
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
stackend = end_of_stack(tsk);
*stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
#endif
/*
* One for us, one for whoever does the "release_task()" (usually
* parent)
*/
atomic_set(&tsk->usage, 2);
#ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
#endif
tsk->splice_pipe = NULL;
account_kernel_stack(ti, 1);
#ifdef CONFIG_ANDROID_LMK_ADJ_RBTREE
RB_CLEAR_NODE(&tsk->adj_node);
#endif
return tsk;
out:
free_thread_info(ti);
free_task_struct(tsk);
return NULL;
}
示例14: copy_signal
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct signal_struct *sig;
if (clone_flags & CLONE_THREAD)
return 0;
sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -ENOMEM;
sig->nr_threads = 1;
atomic_set(&sig->live, 1);
atomic_set(&sig->sigcnt, 1);
/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
init_waitqueue_head(&sig->wait_chldexit);
if (clone_flags & CLONE_NEWPID)
sig->flags |= SIGNAL_UNKILLABLE;
sig->curr_target = tsk;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->real_timer.function = it_real_fn;
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
posix_cpu_timers_init_group(sig);
tty_audit_fork(sig);
sched_autogroup_fork(sig);
#ifdef CONFIG_CGROUPS
init_rwsem(&sig->group_rwsem);
#endif
sig->oom_adj = current->signal->oom_adj;
sig->oom_score_adj = current->signal->oom_score_adj;
sig->oom_score_adj_min = current->signal->oom_score_adj_min;
#ifdef CONFIG_ANDROID_LMK_ADJ_RBTREE
RB_CLEAR_NODE(&sig->adj_node);
#endif
sig->has_child_subreaper = current->signal->has_child_subreaper ||
current->signal->is_child_subreaper;
mutex_init(&sig->cred_guard_mutex);
return 0;
}
示例15: ip4_frag_append_to_last_run
/* Append skb to the last "run". */
static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
struct sk_buff *skb)
{
RB_CLEAR_NODE(&skb->rbnode);
FRAG_CB(skb)->next_frag = NULL;
FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
FRAG_CB(q->fragments_tail)->next_frag = skb;
q->fragments_tail = skb;
}