本文整理汇总了C++中rb_erase函数的典型用法代码示例。如果您正苦于以下问题:C++ rb_erase函数的具体用法?C++ rb_erase怎么用?C++ rb_erase使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rb_erase函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: delete_prio_tracer
void delete_prio_tracer(pid_t tid)
{
struct prio_tracer *pt;
struct dentry *d;
unsigned long flags;
spin_lock_irqsave(&pts_lock, flags);
pt = query_prio_tracer(tid);
if (!pt) {
spin_unlock_irqrestore(&pts_lock, flags);
return;
}
d = pt->debugfs_entry;
spin_unlock_irqrestore(&pts_lock, flags);
/* debugfs involves mutex... */
debugfs_remove(d);
spin_lock_irqsave(&pts_lock, flags);
rb_erase(&pt->rb_node, &priority_tracers);
kfree(pt);
spin_unlock_irqrestore(&pts_lock, flags);
}
示例2: ocfs2_extent_map_drop
/*
* Remove all entries past new_clusters, inclusive of an entry that
* contains new_clusters. This is effectively a cache forget.
*
* If you want to also clip the last extent by some number of clusters,
* you need to call ocfs2_extent_map_trunc().
* This code does not check or modify ip_clusters.
*/
int ocfs2_extent_map_drop(struct inode *inode, u32 new_clusters)
{
struct rb_node *free_head = NULL;
struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
struct ocfs2_extent_map_entry *ent;
spin_lock(&OCFS2_I(inode)->ip_lock);
__ocfs2_extent_map_drop(inode, new_clusters, &free_head, &ent);
if (ent) {
rb_erase(&ent->e_node, &em->em_extents);
ent->e_node.rb_right = free_head;
free_head = &ent->e_node;
}
spin_unlock(&OCFS2_I(inode)->ip_lock);
if (free_head)
__ocfs2_extent_map_drop_cleanup(free_head);
return 0;
}
示例3: perf_session__add_hist_entry
static int perf_session__add_hist_entry(struct perf_session *self,
struct addr_location *al, u64 count)
{
bool hit;
struct hist_entry *he;
if (sym_hist_filter != NULL &&
(al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) {
/* We're only interested in a symbol named sym_hist_filter */
if (al->sym != NULL) {
rb_erase(&al->sym->rb_node,
&al->map->dso->symbols[al->map->type]);
symbol__delete(al->sym);
}
return 0;
}
he = __perf_session__add_hist_entry(self, al, NULL, count, &hit);
if (he == NULL)
return -ENOMEM;
return annotate__hist_hit(he, al->addr);
}
示例4: afs_break_callback
/*
* actually break a callback
*/
static void afs_break_callback(struct afs_server *server,
struct afs_vnode *vnode)
{
_enter("");
set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
if (vnode->cb_promised) {
spin_lock(&vnode->lock);
_debug("break callback");
spin_lock(&server->cb_lock);
if (vnode->cb_promised) {
rb_erase(&vnode->cb_promise, &server->cb_promises);
vnode->cb_promised = false;
}
spin_unlock(&server->cb_lock);
queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
spin_unlock(&vnode->lock);
}
}
示例5: release_group
/* release group, return 1 if this was last release and group is destroyed
* timout work is canceled sync */
static int release_group(struct mcast_group *group, int from_timeout_handler)
{
struct mlx4_ib_demux_ctx *ctx = group->demux;
int nzgroup;
mutex_lock(&ctx->mcg_table_lock);
mutex_lock(&group->lock);
if (atomic_dec_and_test(&group->refcount)) {
if (!from_timeout_handler) {
if (group->state != MCAST_IDLE &&
!cancel_delayed_work(&group->timeout_work)) {
atomic_inc(&group->refcount);
mutex_unlock(&group->lock);
mutex_unlock(&ctx->mcg_table_lock);
return 0;
}
}
nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0);
if (nzgroup)
del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
if (!list_empty(&group->pending_list))
mcg_warn_group(group, "releasing a group with non empty pending list\n");
if (nzgroup)
rb_erase(&group->node, &ctx->mcg_table);
list_del_init(&group->mgid0_list);
mutex_unlock(&group->lock);
mutex_unlock(&ctx->mcg_table_lock);
kfree(group);
return 1;
} else {
mutex_unlock(&group->lock);
mutex_unlock(&ctx->mcg_table_lock);
}
return 0;
}
示例6: unregister_event
void unregister_event(int fd)
{
int ret;
struct event_info *ei;
ei = lookup_event(fd);
if (!ei)
return;
ret = epoll_ctl(efd, EPOLL_CTL_DEL, fd, NULL);
if (ret)
sd_err("failed to delete epoll event for fd %d: %m", fd);
rb_erase(&ei->rb, &events_tree);
free(ei);
/*
* Although ei is no longer valid pointer, ei->handler() might be about
* to be called in do_event_loop(). Refreshing the event loop is safe.
*/
event_force_refresh();
tracepoint(event, unregister, fd);
}
示例7: afs_do_give_up_callback
/*
* record the callback for breaking
* - the caller must hold server->cb_lock
*/
static void afs_do_give_up_callback(struct afs_server *server,
struct afs_vnode *vnode)
{
struct afs_callback *cb;
_enter("%p,%p", server, vnode);
cb = &server->cb_break[server->cb_break_head];
cb->fid = vnode->fid;
cb->version = vnode->cb_version;
cb->expiry = vnode->cb_expiry;
cb->type = vnode->cb_type;
smp_wmb();
server->cb_break_head =
(server->cb_break_head + 1) &
(ARRAY_SIZE(server->cb_break) - 1);
/* defer the breaking of callbacks to try and collect as many as
* possible to ship in one operation */
switch (atomic_inc_return(&server->cb_break_n)) {
case 1 ... AFSCBMAX - 1:
queue_delayed_work(afs_callback_update_worker,
&server->cb_break_work, HZ * 2);
break;
case AFSCBMAX:
afs_flush_callback_breaks(server);
break;
default:
break;
}
ASSERT(server->cb_promises.rb_node != NULL);
rb_erase(&vnode->cb_promise, &server->cb_promises);
vnode->cb_promised = false;
_leave("");
}
示例8: id_map_ent_timeout
static void id_map_ent_timeout(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
struct id_map_entry *db_ent, *found_ent;
struct mlx4_ib_dev *dev = ent->dev;
struct mlx4_ib_sriov *sriov = &dev->sriov;
struct rb_root *sl_id_map = &sriov->sl_id_map;
int pv_id = (int) ent->pv_cm_id;
spin_lock(&sriov->id_map_lock);
db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
if (!db_ent)
goto out;
found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
if (found_ent && found_ent == ent)
rb_erase(&found_ent->node, sl_id_map);
idr_remove(&sriov->pv_id_table, pv_id);
out:
list_del(&ent->list);
spin_unlock(&sriov->id_map_lock);
kfree(ent);
}
示例9: try_merge_map
static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
{
struct extent_map *merge = NULL;
struct rb_node *rb;
if (em->start != 0) {
rb = rb_prev(&em->rb_node);
if (rb)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && mergable_maps(merge, em)) {
em->start = merge->start;
em->orig_start = merge->orig_start;
em->len += merge->len;
em->block_len += merge->block_len;
em->block_start = merge->block_start;
merge->in_tree = 0;
em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
em->mod_start = merge->mod_start;
em->generation = max(em->generation, merge->generation);
rb_erase(&merge->rb_node, &tree->map);
free_extent_map(merge);
}
}
示例10: uv_teardown_irq
void uv_teardown_irq(unsigned int irq)
{
struct uv_irq_2_mmr_pnode *e;
struct rb_node *n;
unsigned long irqflags;
spin_lock_irqsave(&uv_irq_lock, irqflags);
n = uv_irq_root.rb_node;
while (n) {
e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
if (e->irq == irq) {
arch_disable_uv_irq(e->pnode, e->offset);
rb_erase(n, &uv_irq_root);
kfree(e);
break;
}
if (irq < e->irq)
n = n->rb_left;
else
n = n->rb_right;
}
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
destroy_irq(irq);
}
示例11: add_extent_mapping
/**
* add_extent_mapping - add new extent map to the extent tree
* @tree: tree to insert new map in
* @em: map to insert
*
* Insert @em into @tree or perform a simple forward/backward merge with
* existing mappings. The extent_map struct passed in will be inserted
* into the tree directly, with an additional reference taken, or a
* reference dropped if the merge attempt was sucessfull.
*/
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em)
{
int ret = 0;
struct extent_map *merge = NULL;
struct rb_node *rb;
struct extent_map *exist;
exist = lookup_extent_mapping(tree, em->start, em->len);
if (exist) {
free_extent_map(exist);
ret = -EEXIST;
goto out;
}
assert_spin_locked(&tree->lock);
rb = tree_insert(&tree->map, em->start, &em->rb_node);
if (rb) {
ret = -EEXIST;
free_extent_map(merge);
goto out;
}
atomic_inc(&em->refs);
if (em->start != 0) {
rb = rb_prev(&em->rb_node);
if (rb)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && mergable_maps(merge, em)) {
em->start = merge->start;
em->len += merge->len;
em->block_len += merge->block_len;
em->block_start = merge->block_start;
merge->in_tree = 0;
rb_erase(&merge->rb_node, &tree->map);
free_extent_map(merge);
}
}
示例12: diff__process_sample_event
static int diff__process_sample_event(event_t *event, struct perf_session *session)
{
struct addr_location al;
struct sample_data data = { .period = 1, };
if (event__preprocess_sample(event, session, &al, &data, NULL) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
if (al.filtered || al.sym == NULL)
return 0;
if (hists__add_entry(&session->hists, &al, data.period)) {
pr_warning("problem incrementing symbol period, skipping event\n");
return -1;
}
session->hists.stats.total_period += data.period;
return 0;
}
static struct perf_event_ops event_ops = {
.sample = diff__process_sample_event,
.mmap = event__process_mmap,
.comm = event__process_comm,
.exit = event__process_task,
.fork = event__process_task,
.lost = event__process_lost,
};
static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
if (hist_entry__cmp(he, iter) < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, root);
}
static void hists__resort_entries(struct hists *self)
{
unsigned long position = 1;
struct rb_root tmp = RB_ROOT;
struct rb_node *next = rb_first(&self->entries);
while (next != NULL) {
struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
rb_erase(&n->rb_node, &self->entries);
n->position = position++;
perf_session__insert_hist_entry_by_name(&tmp, n);
}
self->entries = tmp;
}
示例13: rb_erase_init
static void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
rb_erase(n, root);
RB_CLEAR_NODE(n);
}
开发者ID:Minia89,项目名称:Note-3-AEL-Kernel-SM-N9005_EUR_LL_Opensource_Update2.,代码行数:5,代码来源:fiops-iosched.c
示例14: remove_attr
static void remove_attr(struct rb_root *root, struct ib_sa_attr_list *attr_list)
{
rb_erase(&attr_list->node, root);
free_attr_list(attr_list);
kfree(attr_list);
}
示例15: fill_sha512
//.........这里部分代码省略.........
log_err("fio: bad verify type: %d\n", td->o.verify);
assert(0);
}
if (td->o.verify_offset)
memswp(p, p + td->o.verify_offset, hdr_size(hdr));
}
/*
* fill body of io_u->buf with random data and add a header with the
* checksum of choice
*/
void populate_verify_io_u(struct thread_data *td, struct io_u *io_u)
{
if (td->o.verify == VERIFY_NULL)
return;
fill_pattern_headers(td, io_u, 0, 0);
}
int get_next_verify(struct thread_data *td, struct io_u *io_u)
{
struct io_piece *ipo = NULL;
/*
* this io_u is from a requeue, we already filled the offsets
*/
if (io_u->file)
return 0;
if (!RB_EMPTY_ROOT(&td->io_hist_tree)) {
struct rb_node *n = rb_first(&td->io_hist_tree);
ipo = rb_entry(n, struct io_piece, rb_node);
rb_erase(n, &td->io_hist_tree);
assert(ipo->flags & IP_F_ONRB);
ipo->flags &= ~IP_F_ONRB;
} else if (!flist_empty(&td->io_hist_list)) {
ipo = flist_entry(td->io_hist_list.next, struct io_piece, list);
flist_del(&ipo->list);
assert(ipo->flags & IP_F_ONLIST);
ipo->flags &= ~IP_F_ONLIST;
}
if (ipo) {
td->io_hist_len--;
io_u->offset = ipo->offset;
io_u->buflen = ipo->len;
io_u->file = ipo->file;
io_u->flags |= IO_U_F_VER_LIST;
if (ipo->flags & IP_F_TRIMMED)
io_u->flags |= IO_U_F_TRIMMED;
if (!fio_file_open(io_u->file)) {
int r = td_io_open_file(td, io_u->file);
if (r) {
dprint(FD_VERIFY, "failed file %s open\n",
io_u->file->file_name);
return 1;
}
}
get_file(ipo->file);
assert(fio_file_open(io_u->file));