本文整理汇总了C++中radix_tree_lookup函数的典型用法代码示例。如果您正苦于以下问题:C++ radix_tree_lookup函数的具体用法?C++ radix_tree_lookup怎么用?C++ radix_tree_lookup使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了radix_tree_lookup函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: xfs_iget
/*
* Look up an inode by number in the given file system.
* The inode is looked up in the cache held in each AG.
* If the inode is found in the cache, initialise the vfs inode
* if necessary.
*
* If it is not in core, read it in from the file system's device,
* add it to the cache and initialise the vfs inode.
*
* The inode is locked according to the value of the lock_flags parameter.
* This flag parameter indicates how and if the inode's IO lock and inode lock
* should be taken.
*
* mp -- the mount point structure for the current file system. It points
* to the inode hash table.
* tp -- a pointer to the current transaction if there is one. This is
* simply passed through to the xfs_iread() call.
* ino -- the number of the inode desired. This is the unique identifier
* within the file system for the inode being requested.
* lock_flags -- flags indicating how to lock the inode. See the comment
* for xfs_ilock() for a list of valid values.
*/
int
xfs_iget(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_ino_t ino,
uint flags,
uint lock_flags,
xfs_inode_t **ipp)
{
xfs_inode_t *ip;
int error;
xfs_perag_t *pag;
xfs_agino_t agino;
/* reject inode numbers outside existing AGs */
if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
return EINVAL;
/* get the perag structure and ensure that it's inode capable */
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
agino = XFS_INO_TO_AGINO(mp, ino);
again:
error = 0;
rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, agino);
if (ip) {
error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
if (error)
goto out_error_or_again;
} else {
rcu_read_unlock();
XFS_STATS_INC(xs_ig_missed);
error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
flags, lock_flags);
if (error)
goto out_error_or_again;
}
xfs_perag_put(pag);
*ipp = ip;
/*
* If we have a real type for an on-disk inode, we can set ops(&unlock)
* now. If it's a new inode being created, xfs_ialloc will handle it.
*/
if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
xfs_setup_inode(ip);
return 0;
out_error_or_again:
if (error == EAGAIN) {
delay(1);
goto again;
}
xfs_perag_put(pag);
return error;
}
示例2: mutex_lock
/**
* hwspin_lock_request_specific() - request for a specific hwspinlock
* @id: index of the specific hwspinlock that is requested
*
* This function should be called by users of the hwspinlock module,
* in order to assign them a specific hwspinlock.
* Usually early board code will be calling this function in order to
* reserve specific hwspinlock ids for predefined purposes.
*
* Should be called from a process context (might sleep)
*
* Returns the address of the assigned hwspinlock, or NULL on error
*/
struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
{
struct hwspinlock *hwlock;
int ret;
mutex_lock(&hwspinlock_tree_lock);
/* make sure this hwspinlock exists */
hwlock = radix_tree_lookup(&hwspinlock_tree, id);
if (!hwlock) {
pr_warn("hwspinlock %u does not exist\n", id);
goto out;
}
/* sanity check (this shouldn't happen) */
WARN_ON(hwlock_to_id(hwlock) != id);
/* make sure this hwspinlock is unused */
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
if (ret == 0) {
pr_warn("hwspinlock %u is already in use\n", id);
hwlock = NULL;
goto out;
}
/* mark as used and power up */
ret = __hwspin_lock_request(hwlock);
if (ret < 0)
hwlock = NULL;
out:
mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
示例3: kzalloc
static struct q_irq_data *qpnpint_alloc_irq_data(
struct q_chip_data *chip_d,
unsigned long hwirq)
{
struct q_irq_data *irq_d;
struct q_perip_data *per_d;
irq_d = kzalloc(sizeof(struct q_irq_data), GFP_KERNEL);
if (!irq_d)
return ERR_PTR(-ENOMEM);
/**
* The Peripheral Tree is keyed from the slave + per_id. We're
* ignoring the irq bits here since this peripheral structure
* should be common for all irqs on the same peripheral.
*/
per_d = radix_tree_lookup(&chip_d->per_tree, (hwirq & ~0x7));
if (!per_d) {
per_d = kzalloc(sizeof(struct q_perip_data), GFP_KERNEL);
if (!per_d) {
kfree(irq_d);
return ERR_PTR(-ENOMEM);
}
radix_tree_insert(&chip_d->per_tree,
(hwirq & ~0x7), per_d);
}
irq_d->per_d = per_d;
return irq_d;
}
示例4: might_sleep_if
/**
* wb_get_create - get wb for a given memcg, create if necessary
* @bdi: target bdi
* @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
* @gfp: allocation mask to use
*
* Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
* create one. The returned wb has its refcount incremented.
*
* This function uses css_get() on @memcg_css and thus expects its refcnt
* to be positive on invocation. IOW, rcu_read_lock() protection on
* @memcg_css isn't enough. try_get it before calling this function.
*
* A wb is keyed by its associated memcg. As blkcg implicitly enables
* memcg on the default hierarchy, memcg association is guaranteed to be
* more specific (equal or descendant to the associated blkcg) and thus can
* identify both the memcg and blkcg associations.
*
* Because the blkcg associated with a memcg may change as blkcg is enabled
* and disabled closer to root in the hierarchy, each wb keeps track of
* both the memcg and blkcg associated with it and verifies the blkcg on
* each lookup. On mismatch, the existing wb is discarded and a new one is
* created.
*/
struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
struct cgroup_subsys_state *memcg_css,
gfp_t gfp)
{
struct bdi_writeback *wb;
might_sleep_if(gfpflags_allow_blocking(gfp));
if (!memcg_css->parent)
return &bdi->wb;
do {
rcu_read_lock();
wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
if (wb) {
struct cgroup_subsys_state *blkcg_css;
/* see whether the blkcg association has changed */
blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
&io_cgrp_subsys);
if (unlikely(wb->blkcg_css != blkcg_css ||
!wb_tryget(wb)))
wb = NULL;
css_put(blkcg_css);
}
rcu_read_unlock();
} while (!wb && !cgwb_create(bdi, memcg_css, gfp));
return wb;
}
示例5: uvm_pmm_sysmem_mappings_reparent_gpu_mapping
void uvm_pmm_sysmem_mappings_reparent_gpu_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings,
NvU64 dma_addr,
uvm_va_block_t *va_block)
{
NvU64 virt_addr;
uvm_reverse_map_t *reverse_map;
const NvU64 base_key = dma_addr / PAGE_SIZE;
uvm_page_index_t new_start_page;
UVM_ASSERT(PAGE_ALIGNED(dma_addr));
UVM_ASSERT(va_block);
UVM_ASSERT(va_block->va_range);
if (!sysmem_mappings->gpu->access_counters_supported)
return;
uvm_spin_lock(&sysmem_mappings->reverse_map_lock);
reverse_map = radix_tree_lookup(&sysmem_mappings->reverse_map_tree, base_key);
UVM_ASSERT(reverse_map);
// Compute virt address by hand since the old VA block may be messed up
// during split
virt_addr = reverse_map->va_block->start + reverse_map->region.first * PAGE_SIZE;
new_start_page = uvm_va_block_cpu_page_index(va_block, virt_addr);
reverse_map->region = uvm_va_block_region(new_start_page,
new_start_page + uvm_va_block_region_num_pages(reverse_map->region));
reverse_map->va_block = va_block;
UVM_ASSERT(uvm_va_block_contains_address(va_block, uvm_reverse_map_start(reverse_map)));
UVM_ASSERT(uvm_va_block_contains_address(va_block, uvm_reverse_map_end(reverse_map)));
uvm_spin_unlock(&sysmem_mappings->reverse_map_lock);
}
示例6: mlx4_unregister_mac
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
int index = qpn - info->base_qpn;
struct mlx4_mac_entry *entry;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (entry) {
mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
radix_tree_delete(&info->mac_tree, qpn);
index = find_index(dev, table, entry->mac);
kfree(entry);
}
}
mutex_lock(&table->mutex);
if (validate_index(dev, table, index))
goto out;
/* Check whether this address has reference count */
if (!(--table->refs[index])) {
table->entries[index] = 0;
mlx4_set_port_mac_table(dev, port, table->entries);
--table->total;
}
out:
mutex_unlock(&table->mutex);
}
示例7: mlx4_unregister_mac
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
int index = qpn - info->base_qpn;
struct mlx4_mac_entry *entry;
if (dev->caps.vep_uc_steering) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (entry) {
mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
radix_tree_delete(&info->mac_tree, qpn);
index = find_index(dev, table, entry->mac);
kfree(entry);
}
}
mutex_lock(&table->mutex);
if (validate_index(dev, table, index))
goto out;
table->entries[index] = 0;
mlx4_set_port_mac_table(dev, port, table->entries);
--table->total;
out:
mutex_unlock(&table->mutex);
}
示例8: F2FS_I_SB
static struct extent_tree *__grab_extent_tree(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et;
nid_t ino = inode->i_ino;
down_write(&sbi->extent_tree_lock);
et = radix_tree_lookup(&sbi->extent_tree_root, ino);
if (!et) {
et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
memset(et, 0, sizeof(struct extent_tree));
et->ino = ino;
et->root = RB_ROOT;
et->cached_en = NULL;
rwlock_init(&et->lock);
atomic_set(&et->refcount, 0);
et->count = 0;
sbi->total_ext_tree++;
}
atomic_inc(&et->refcount);
up_write(&sbi->extent_tree_lock);
/* never died until evict_inode */
F2FS_I(inode)->extent_tree = et;
return et;
}
示例9: xfs_qm_dqget_cache_lookup
/*
* Look up the dquot in the in-core cache. If found, the dquot is returned
* locked and ready to go.
*/
static struct xfs_dquot *
xfs_qm_dqget_cache_lookup(
struct xfs_mount *mp,
struct xfs_quotainfo *qi,
struct radix_tree_root *tree,
xfs_dqid_t id)
{
struct xfs_dquot *dqp;
restart:
mutex_lock(&qi->qi_tree_lock);
dqp = radix_tree_lookup(tree, id);
if (!dqp) {
mutex_unlock(&qi->qi_tree_lock);
XFS_STATS_INC(mp, xs_qm_dqcachemisses);
return NULL;
}
xfs_dqlock(dqp);
if (dqp->dq_flags & XFS_DQ_FREEING) {
xfs_dqunlock(dqp);
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_freeing(dqp);
delay(1);
goto restart;
}
dqp->q_nrefs++;
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_hit(dqp);
XFS_STATS_INC(mp, xs_qm_dqcachehits);
return dqp;
}
示例10: lockdep_assert_held
/**
* ioc_lookup_icq - lookup io_cq from ioc
* @ioc: the associated io_context
* @q: the associated request_queue
*
* Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
* with @q->queue_lock held.
*/
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
{
struct io_cq *icq;
lockdep_assert_held(q->queue_lock);
/*
* icq's are indexed from @ioc using radix tree and hint pointer,
* both of which are protected with RCU. All removals are done
* holding both q and ioc locks, and we're holding q lock - if we
* find a icq which points to us, it's guaranteed to be valid.
*/
rcu_read_lock();
icq = rcu_dereference(ioc->icq_hint);
if (icq && icq->q == q)
goto out;
icq = radix_tree_lookup(&ioc->icq_tree, q->id);
if (icq && icq->q == q)
rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
else
icq = NULL;
out:
rcu_read_unlock();
return icq;
}
示例11: radix_tree_lookup
// ARM10C 20141122
// irq: 16
// ARM10C 20150321
// irq: 347
// ARM10C 20150328
// irq: 152
// ARM10C 20150509
// irq: 152
// ARM10C 20150516
// irq: 152
// ARM10C 20150523
// irq: 347
struct irq_desc *irq_to_desc(unsigned int irq)
{
// irq: 16, radix_tree_lookup(&irq_desc_tree, 16): kmem_cache#28-oX (irq 16)
// irq: 347, radix_tree_lookup(&irq_desc_tree, 347): kmem_cache#28-oX (irq 347)
return radix_tree_lookup(&irq_desc_tree, irq);
// return kmem_cache#28-oX (irq 16)
// return kmem_cache#28-oX (irq 347)
}
示例12: xfs_iget
int
xfs_iget(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_ino_t ino,
uint flags,
uint lock_flags,
xfs_inode_t **ipp)
{
xfs_inode_t *ip;
int error;
xfs_perag_t *pag;
xfs_agino_t agino;
ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
return EINVAL;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
agino = XFS_INO_TO_AGINO(mp, ino);
again:
error = 0;
rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, agino);
if (ip) {
error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
if (error)
goto out_error_or_again;
} else {
rcu_read_unlock();
XFS_STATS_INC(xs_ig_missed);
error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
flags, lock_flags);
if (error)
goto out_error_or_again;
}
xfs_perag_put(pag);
*ipp = ip;
if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
xfs_setup_inode(ip);
return 0;
out_error_or_again:
if (error == EAGAIN) {
delay(1);
goto again;
}
xfs_perag_put(pag);
return error;
}
示例13: __fscache_check_page_write
/*
* check to see if a page is being written to the cache
*/
bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
{
void *val;
rcu_read_lock();
val = radix_tree_lookup(&cookie->stores, page->index);
rcu_read_unlock();
return val != NULL;
}
示例14: spin_lock
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
{
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5e_vxlan *vxlan;
spin_lock(&vxlan_db->lock);
vxlan = radix_tree_lookup(&vxlan_db->tree, port);
spin_unlock(&vxlan_db->lock);
return vxlan;
}
示例15: sbd_free_diskmem
/** free memory allocated for disk
* @param none
* @ret none
*/
void sbd_free_diskmem(void)
{
int i;
void *p;
for(i = 0; i < (SBD_BYTES + PAGE_SIZE - 1) >> PAGE_SHIFT; i++){
p = radix_tree_lookup(&sbd_data, i);
radix_tree_delete(&sbd_data, i);
free_page((unsigned long)p);
}
}