本文整理汇总了C++中radix_tree_preload函数的典型用法代码示例。如果您正苦于以下问题:C++ radix_tree_preload函数的具体用法?C++ radix_tree_preload怎么用?C++ radix_tree_preload使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了radix_tree_preload函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: nfs_inode_add_request
/*
* Insert a write request into an inode
*/
static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(inode);
int error;
error = radix_tree_preload(GFP_NOFS);
if (error != 0)
goto out;
/* Lock the request! */
nfs_lock_request_dontget(req);
spin_lock(&inode->i_lock);
error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
BUG_ON(error);
if (!nfsi->npages) {
igrab(inode);
if (nfs_have_delegation(inode, FMODE_WRITE))
nfsi->change_attr++;
}
SetPagePrivate(req->wb_page);
set_page_private(req->wb_page, (unsigned long)req);
nfsi->npages++;
kref_get(&req->wb_kref);
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
NFS_PAGE_TAG_LOCKED);
spin_unlock(&inode->i_lock);
radix_tree_preload_end();
out:
return error;
}
示例2: add_to_swap_cache
/*
* add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
int error;
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageSwapCache(page));
VM_BUG_ON(!PageSwapBacked(page));
error = radix_tree_preload(gfp_mask);
if (!error) {
page_cache_get(page);
SetPageSwapCache(page);
set_page_private(page, entry.val);
spin_lock_irq(&swapper_space.tree_lock);
error = radix_tree_insert(&swapper_space.page_tree,
entry.val, page);
if (likely(!error)) {
total_swapcache_pages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
INC_CACHE_INFO(add_total);
}
spin_unlock_irq(&swapper_space.tree_lock);
radix_tree_preload_end();
if (unlikely(error)) {
set_page_private(page, 0UL);
ClearPageSwapCache(page);
page_cache_release(page);
}
}
return error;
}
示例3: __free_extent
/*
* remove an extent from the root, returns 0 on success
*/
static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
*root, u64 blocknr, u64 num_blocks, int pin)
{
struct btrfs_path path;
struct btrfs_key key;
struct btrfs_fs_info *info = root->fs_info;
struct btrfs_root *extent_root = info->extent_root;
int ret;
struct btrfs_extent_item *ei;
struct btrfs_key ins;
u32 refs;
BUG_ON(pin && num_blocks != 1);
key.objectid = blocknr;
key.flags = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
key.offset = num_blocks;
find_free_extent(trans, root, 0, 0, (u64)-1, &ins);
btrfs_init_path(&path);
ret = btrfs_search_slot(trans, extent_root, &key, &path, -1, 1);
if (ret) {
btrfs_print_tree(extent_root, extent_root->node);
printf("failed to find %llu\n",
(u64)key.objectid);
BUG();
}
ei = btrfs_item_ptr(&path.nodes[0]->leaf, path.slots[0],
struct btrfs_extent_item);
BUG_ON(ei->refs == 0);
refs = btrfs_extent_refs(ei) - 1;
btrfs_set_extent_refs(ei, refs);
if (refs == 0) {
u64 super_blocks_used;
if (pin) {
int err;
unsigned long bl = blocknr;
radix_tree_preload(GFP_KERNEL);
err = radix_tree_insert(&info->pinned_radix,
blocknr, (void *)bl);
BUG_ON(err);
radix_tree_preload_end();
}
super_blocks_used = btrfs_super_blocks_used(info->disk_super);
btrfs_set_super_blocks_used(info->disk_super,
super_blocks_used - num_blocks);
ret = btrfs_del_item(trans, extent_root, &path);
if (!pin && extent_root->fs_info->last_insert.objectid >
blocknr)
extent_root->fs_info->last_insert.objectid = blocknr;
if (ret)
BUG();
ret = update_block_group(trans, root, blocknr, num_blocks, 0);
BUG_ON(ret);
}
btrfs_release_path(extent_root, &path);
finish_current_insert(trans, extent_root);
return ret;
}
示例4: nilfs_btnode_prepare_change_key
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *obh, *nbh;
struct inode *inode = NILFS_BTNC_I(btnc);
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
int err;
if (oldkey == newkey)
return 0;
obh = ctxt->bh;
ctxt->newbh = NULL;
if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
lock_page(obh->b_page);
retry:
err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
if (err)
goto failed_unlock;
if (unlikely(oldkey != obh->b_page->index))
NILFS_PAGE_BUG(obh->b_page,
"invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey,
(unsigned long long)newkey);
spin_lock_irq(&btnc->tree_lock);
err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
spin_unlock_irq(&btnc->tree_lock);
radix_tree_preload_end();
if (!err)
return 0;
else if (err != -EEXIST)
goto failed_unlock;
err = invalidate_inode_pages2_range(btnc, newkey, newkey);
if (!err)
goto retry;
unlock_page(obh->b_page);
}
nbh = nilfs_btnode_create_block(btnc, newkey);
if (!nbh)
return -ENOMEM;
BUG_ON(nbh == obh);
ctxt->newbh = nbh;
return 0;
failed_unlock:
unlock_page(obh->b_page);
return err;
}
示例5: add_to_swap_cache
/**
* add_to_swap_cache:page插入到交换区高速缓存中
*/
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
int error;
error = radix_tree_preload(gfp_mask);
if (!error) {
error = __add_to_swap_cache(page, entry);
radix_tree_preload_end();
}
return error;
}
示例6: insert_cursor
/**
* insert_cursor - allocate file_fsdata, insert cursor to tree and hash table
* @cursor:
* @file:
* @inode:
*
* Allocates reiser4_file_fsdata, attaches it to @cursor, inserts cursor to
* reiser4 super block's hash table and radix tree.
add detachable readdir
* state to the @f
*/
static int insert_cursor(dir_cursor *cursor, struct file *file,
struct inode *inode)
{
int result;
reiser4_file_fsdata *fsdata;
memset(cursor, 0, sizeof *cursor);
/* this is either first call to readdir, or rewind. Anyway, create new
* cursor. */
fsdata = create_fsdata(NULL);
if (fsdata != NULL) {
result = radix_tree_preload(reiser4_ctx_gfp_mask_get());
if (result == 0) {
struct d_cursor_info *info;
oid_t oid;
info = d_info(inode);
oid = get_inode_oid(inode);
/* cid occupies higher 12 bits of f->f_pos. Don't
* allow it to become negative: this confuses
* nfsd_readdir() */
cursor->key.cid = (++cid_counter) & 0x7ff;
cursor->key.oid = oid;
cursor->fsdata = fsdata;
cursor->info = info;
cursor->ref = 1;
spin_lock_inode(inode);
/* install cursor as @f's private_data, discarding old
* one if necessary */
#if REISER4_DEBUG
if (file->private_data)
warning("", "file has fsdata already");
#endif
clean_fsdata(file);
free_file_fsdata_nolock(file);
file->private_data = fsdata;
fsdata->cursor = cursor;
spin_unlock_inode(inode);
spin_lock(&d_lock);
/* insert cursor into hash table */
d_cursor_hash_insert(&info->table, cursor);
/* and chain it into radix-tree */
bind_cursor(cursor, (unsigned long)oid);
spin_unlock(&d_lock);
radix_tree_preload_end();
file->f_pos = ((__u64) cursor->key.cid) << CID_SHIFT;
}
} else
result = RETERR(-ENOMEM);
return result;
}
示例7: find_get_page
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
struct page *found_page, *new_page = NULL;
int err;
do {
found_page = find_get_page(&swapper_space, entry.val);
if (found_page)
break;
if (!new_page) {
new_page = alloc_page_vma(gfp_mask, vma, addr);
if (!new_page)
break;
}
err = radix_tree_preload(gfp_mask & GFP_KERNEL);
if (err)
break;
err = swapcache_prepare(entry);
if (err == -EEXIST) {
radix_tree_preload_end();
continue;
}
if (err) {
radix_tree_preload_end();
break;
}
__set_page_locked(new_page);
SetPageSwapBacked(new_page);
err = __add_to_swap_cache(new_page, entry);
if (likely(!err)) {
radix_tree_preload_end();
lru_cache_add_anon(new_page);
swap_readpage(new_page);
return new_page;
}
radix_tree_preload_end();
ClearPageSwapBacked(new_page);
__clear_page_locked(new_page);
swapcache_free(entry, NULL);
} while (err != -ENOMEM);
if (new_page)
page_cache_release(new_page);
return found_page;
}
示例8: create_io_context
/**
* ioc_create_icq - create and link io_cq
* @q: request_queue of interest
* @gfp_mask: allocation mask
*
* Make sure io_cq linking %current->io_context and @q exists. If either
* io_context and/or icq don't exist, they will be created using @gfp_mask.
*
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
{
struct elevator_type *et = q->elevator->type;
struct io_context *ioc;
struct io_cq *icq;
/* allocate stuff */
ioc = create_io_context(current, gfp_mask, q->node);
if (!ioc)
return NULL;
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node);
if (!icq)
return NULL;
if (radix_tree_preload(gfp_mask) < 0) {
kmem_cache_free(et->icq_cache, icq);
return NULL;
}
icq->ioc = ioc;
icq->q = q;
INIT_LIST_HEAD(&icq->q_node);
INIT_HLIST_NODE(&icq->ioc_node);
/* lock both q and ioc and try to link @icq */
spin_lock_irq(q->queue_lock);
spin_lock(&ioc->lock);
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
hlist_add_head(&icq->ioc_node, &ioc->icq_list);
list_add(&icq->q_node, &q->icq_list);
if (et->ops.elevator_init_icq_fn)
et->ops.elevator_init_icq_fn(icq);
} else {
kmem_cache_free(et->icq_cache, icq);
icq = ioc_lookup_icq(ioc, q);
if (!icq)
printk(KERN_ERR "cfq: icq link failed!\n");
}
spin_unlock(&ioc->lock);
spin_unlock_irq(q->queue_lock);
radix_tree_preload_end();
return icq;
}
示例9: kzalloc
static struct q_irq_data *qpnpint_alloc_irq_data(
struct q_chip_data *chip_d,
unsigned long hwirq)
{
struct q_irq_data *irq_d;
struct q_perip_data *per_d;
int rc;
irq_d = kzalloc(sizeof(struct q_irq_data), GFP_KERNEL);
if (!irq_d)
return ERR_PTR(-ENOMEM);
/**
* The Peripheral Tree is keyed from the slave + per_id. We're
* ignoring the irq bits here since this peripheral structure
* should be common for all irqs on the same peripheral.
*/
per_d = radix_tree_lookup(&chip_d->per_tree, (hwirq & ~0x7));
if (!per_d) {
per_d = kzalloc(sizeof(struct q_perip_data), GFP_KERNEL);
if (!per_d) {
rc = -ENOMEM;
goto alloc_fail;
}
spin_lock_init(&per_d->lock);
rc = radix_tree_preload(GFP_KERNEL);
if (rc)
goto alloc_fail;
rc = radix_tree_insert(&chip_d->per_tree,
(hwirq & ~0x7), per_d);
if (rc)
goto alloc_fail;
radix_tree_preload_end();
}
irq_d->per_d = per_d;
return irq_d;
alloc_fail:
kfree(per_d);
kfree(irq_d);
return ERR_PTR(rc);
}
示例10: find_get_page
/*
* Locate a page of swap in physical memory, reserving swap cache space
* and reading the disk if it is not already cached.
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
*/
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
struct page *found_page, *new_page = NULL;
int err;
do {
/*
* First check the swap cache. Since this is normally
* called after lookup_swap_cache() failed, re-calling
* that would confuse statistics.
*/
found_page = find_get_page(&swapper_space, entry.val);
if (found_page)
break;
/*
* Get a new page to read into from swap.
*/
if (!new_page) {
new_page = alloc_page_vma(gfp_mask, vma, addr);
if (!new_page)
break; /* Out of memory */
}
/*
* call radix_tree_preload() while we can wait.
*/
err = radix_tree_preload(gfp_mask & GFP_KERNEL);
if (err)
break;
/*
* Swap entry may have been freed since our caller observed it.
*/
err = swapcache_prepare(entry);
if (err == -EEXIST) { /* seems racy */
radix_tree_preload_end();
continue;
}
if (err) { /* swp entry is obsolete ? */
radix_tree_preload_end();
break;
}
/* May fail (-ENOMEM) if radix-tree node allocation failed. */
__set_page_locked(new_page);
SetPageSwapBacked(new_page);
err = __add_to_swap_cache(new_page, entry);
if (likely(!err)) {
radix_tree_preload_end();
/*
* Initiate read into locked page and return.
*/
lru_cache_add_anon(new_page);
swap_readpage(new_page);
return new_page;
}
radix_tree_preload_end();
ClearPageSwapBacked(new_page);
__clear_page_locked(new_page);
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
* clear SWAP_HAS_CACHE flag.
*/
swapcache_free(entry, NULL);
} while (err != -ENOMEM);
if (new_page)
page_cache_release(new_page);
return found_page;
}
示例11: uvm_pmm_sysmem_mappings_add_gpu_mapping
// TODO: Bug 1995015: use a more efficient data structure for
// physically-contiguous allocations.
NV_STATUS uvm_pmm_sysmem_mappings_add_gpu_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings,
NvU64 dma_addr,
NvU64 virt_addr,
NvU64 region_size,
uvm_va_block_t *va_block,
uvm_processor_id_t owner)
{
int ret;
uvm_reverse_map_t *new_reverse_map;
NvU64 key;
const NvU64 base_key = dma_addr / PAGE_SIZE;
const NvU32 num_pages = region_size / PAGE_SIZE;
uvm_page_index_t page_index;
UVM_ASSERT(va_block);
UVM_ASSERT(va_block->va_range);
UVM_ASSERT(IS_ALIGNED(dma_addr, region_size));
UVM_ASSERT(IS_ALIGNED(virt_addr, region_size));
UVM_ASSERT(region_size <= UVM_VA_BLOCK_SIZE);
UVM_ASSERT(is_power_of_2(region_size));
UVM_ASSERT(uvm_va_block_contains_address(va_block, virt_addr));
UVM_ASSERT(uvm_va_block_contains_address(va_block, virt_addr + region_size - 1));
uvm_assert_mutex_locked(&va_block->lock);
if (!sysmem_mappings->gpu->access_counters_supported)
return NV_OK;
new_reverse_map = kmem_cache_zalloc(g_reverse_page_map_cache, NV_UVM_GFP_FLAGS);
if (!new_reverse_map)
return NV_ERR_NO_MEMORY;
page_index = uvm_va_block_cpu_page_index(va_block, virt_addr);
new_reverse_map->va_block = va_block;
new_reverse_map->region = uvm_va_block_region(page_index, page_index + num_pages);
new_reverse_map->owner = owner;
for (key = base_key; key < base_key + num_pages; ++key) {
// Pre-load the tree to allocate memory outside of the table lock. This
// returns with preemption disabled.
ret = radix_tree_preload(NV_UVM_GFP_FLAGS);
if (ret != 0) {
NvU64 remove_key;
uvm_spin_lock(&sysmem_mappings->reverse_map_lock);
for (remove_key = base_key; remove_key < key; ++remove_key)
(void *)radix_tree_delete(&sysmem_mappings->reverse_map_tree, remove_key);
uvm_spin_unlock(&sysmem_mappings->reverse_map_lock);
kmem_cache_free(g_reverse_page_map_cache, new_reverse_map);
return NV_ERR_NO_MEMORY;
}
uvm_spin_lock(&sysmem_mappings->reverse_map_lock);
ret = radix_tree_insert(&sysmem_mappings->reverse_map_tree, key, new_reverse_map);
uvm_spin_unlock(&sysmem_mappings->reverse_map_lock);
UVM_ASSERT(ret == 0);
// This re-enables preemption
radix_tree_preload_end();
}
return NV_OK;
}
示例12: zswap_get_swap_cache_page
/*
* zswap_get_swap_cache_page
*
* This is an adaption of read_swap_cache_async()
*
* This function tries to find a page with the given swap entry
* in the swapper_space address space (the swap cache). If the page
* is found, it is returned in retpage. Otherwise, a page is allocated,
* added to the swap cache, and returned in retpage.
*
* If success, the swap cache page is returned in retpage
* Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
* Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
* the new page is added to swapcache and locked
* Returns ZSWAP_SWAPCACHE_FAIL on error
*/
static int zswap_get_swap_cache_page(swp_entry_t entry,
struct page **retpage)
{
struct page *found_page, *new_page = NULL;
struct address_space *swapper_space = swap_address_space(entry);
int err;
*retpage = NULL;
do {
/*
* First check the swap cache. Since this is normally
* called after lookup_swap_cache() failed, re-calling
* that would confuse statistics.
*/
found_page = find_get_page(swapper_space, entry.val);
if (found_page)
break;
/*
* Get a new page to read into from swap.
*/
if (!new_page) {
new_page = alloc_page(GFP_KERNEL);
if (!new_page)
break; /* Out of memory */
}
/*
* call radix_tree_preload() while we can wait.
*/
err = radix_tree_preload(GFP_KERNEL);
if (err)
break;
/*
* Swap entry may have been freed since our caller observed it.
*/
err = swapcache_prepare(entry);
if (err == -EEXIST) { /* seems racy */
radix_tree_preload_end();
continue;
}
if (err) { /* swp entry is obsolete ? */
radix_tree_preload_end();
break;
}
/* May fail (-ENOMEM) if radix-tree node allocation failed. */
__set_page_locked(new_page);
SetPageSwapBacked(new_page);
err = __add_to_swap_cache(new_page, entry);
if (likely(!err)) {
radix_tree_preload_end();
lru_cache_add_anon(new_page);
*retpage = new_page;
return ZSWAP_SWAPCACHE_NEW;
}
radix_tree_preload_end();
ClearPageSwapBacked(new_page);
__clear_page_locked(new_page);
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
* clear SWAP_HAS_CACHE flag.
*/
swapcache_free(entry, NULL);
} while (err != -ENOMEM);
if (new_page)
page_cache_release(new_page);
if (!found_page)
return ZSWAP_SWAPCACHE_FAIL;
*retpage = found_page;
return ZSWAP_SWAPCACHE_EXIST;
}
示例13: xfs_iget_cache_miss
static int
xfs_iget_cache_miss(
struct xfs_mount *mp,
struct xfs_perag *pag,
xfs_trans_t *tp,
xfs_ino_t ino,
struct xfs_inode **ipp,
int flags,
int lock_flags)
{
struct xfs_inode *ip;
int error;
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
int iflags;
ip = xfs_inode_alloc(mp, ino);
if (!ip)
return ENOMEM;
error = xfs_iread(mp, tp, ip, flags);
if (error)
goto out_destroy;
trace_xfs_iget_miss(ip);
if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
error = ENOENT;
goto out_destroy;
}
/*
* Preload the radix tree so we can insert safely under the
* write spinlock. Note that we cannot sleep inside the preload
* region.
*/
if (radix_tree_preload(GFP_KERNEL)) {
error = EAGAIN;
goto out_destroy;
}
/*
* Because the inode hasn't been added to the radix-tree yet it can't
* be found by another thread, so we can do the non-sleeping lock here.
*/
if (lock_flags) {
if (!xfs_ilock_nowait(ip, lock_flags))
BUG();
}
/*
* These values must be set before inserting the inode into the radix
* tree as the moment it is inserted a concurrent lookup (allowed by the
* RCU locking mechanism) can find it and that lookup must see that this
* is an inode currently under construction (i.e. that XFS_INEW is set).
* The ip->i_flags_lock that protects the XFS_INEW flag forms the
* memory barrier that ensures this detection works correctly at lookup
* time.
*/
iflags = XFS_INEW;
if (flags & XFS_IGET_DONTCACHE)
iflags |= XFS_IDONTCACHE;
ip->i_udquot = ip->i_gdquot = NULL;
xfs_iflags_set(ip, iflags);
/* insert the new inode */
spin_lock(&pag->pag_ici_lock);
error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
if (unlikely(error)) {
WARN_ON(error != -EEXIST);
XFS_STATS_INC(xs_ig_dup);
error = EAGAIN;
goto out_preload_end;
}
spin_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
*ipp = ip;
return 0;
out_preload_end:
spin_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
if (lock_flags)
xfs_iunlock(ip, lock_flags);
out_destroy:
__destroy_inode(VFS_I(ip));
xfs_inode_free(ip);
return error;
}
示例14: xfs_iget_cache_miss
static int
xfs_iget_cache_miss(
struct xfs_mount *mp,
struct xfs_perag *pag,
xfs_trans_t *tp,
xfs_ino_t ino,
struct xfs_inode **ipp,
int flags,
int lock_flags)
{
struct xfs_inode *ip;
int error;
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
ip = xfs_inode_alloc(mp, ino);
if (!ip)
return ENOMEM;
error = xfs_iread(mp, tp, ip, flags);
if (error)
goto out_destroy;
trace_xfs_iget_miss(ip);
if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
error = ENOENT;
goto out_destroy;
}
/*
* Preload the radix tree so we can insert safely under the
* write spinlock. Note that we cannot sleep inside the preload
* region.
*/
if (radix_tree_preload(GFP_KERNEL)) {
error = EAGAIN;
goto out_destroy;
}
/*
* Because the inode hasn't been added to the radix-tree yet it can't
* be found by another thread, so we can do the non-sleeping lock here.
*/
if (lock_flags) {
if (!xfs_ilock_nowait(ip, lock_flags))
BUG();
}
spin_lock(&pag->pag_ici_lock);
/* insert the new inode */
error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
if (unlikely(error)) {
WARN_ON(error != -EEXIST);
XFS_STATS_INC(xs_ig_dup);
error = EAGAIN;
goto out_preload_end;
}
/* These values _must_ be set before releasing the radix tree lock! */
ip->i_udquot = ip->i_gdquot = NULL;
xfs_iflags_set(ip, XFS_INEW);
spin_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
*ipp = ip;
return 0;
out_preload_end:
spin_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
if (lock_flags)
xfs_iunlock(ip, lock_flags);
out_destroy:
__destroy_inode(VFS_I(ip));
xfs_inode_free(ip);
return error;
}
示例15: nilfs_btnode_prepare_change_key
/**
* nilfs_btnode_prepare_change_key
* prepare to move contents of the block for old key to one of new key.
* the old buffer will not be removed, but might be reused for new buffer.
* it might return -ENOMEM because of memory allocation errors,
* and might return -EIO because of disk read errors.
*/
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *obh, *nbh;
struct inode *inode = NILFS_BTNC_I(btnc);
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
int err;
if (oldkey == newkey)
return 0;
obh = ctxt->bh;
ctxt->newbh = NULL;
if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
lock_page(obh->b_page);
/*
* We cannot call radix_tree_preload for the kernels older
* than 2.6.23, because it is not exported for modules.
*/
retry:
err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
if (err)
goto failed_unlock;
/* BUG_ON(oldkey != obh->b_page->index); */
if (unlikely(oldkey != obh->b_page->index))
NILFS_PAGE_BUG(obh->b_page,
"invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey,
(unsigned long long)newkey);
spin_lock_irq(&btnc->tree_lock);
err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
spin_unlock_irq(&btnc->tree_lock);
/*
* Note: page->index will not change to newkey until
* nilfs_btnode_commit_change_key() will be called.
* To protect the page in intermediate state, the page lock
* is held.
*/
radix_tree_preload_end();
if (!err)
return 0;
else if (err != -EEXIST)
goto failed_unlock;
err = invalidate_inode_pages2_range(btnc, newkey, newkey);
if (!err)
goto retry;
/* fallback to copy mode */
unlock_page(obh->b_page);
}
nbh = nilfs_btnode_create_block(btnc, newkey);
if (!nbh)
return -ENOMEM;
BUG_ON(nbh == obh);
ctxt->newbh = nbh;
return 0;
failed_unlock:
unlock_page(obh->b_page);
return err;
}