本文整理汇总了C++中page_cache_get函数的典型用法代码示例。如果您正苦于以下问题:C++ page_cache_get函数的具体用法?C++ page_cache_get怎么用?C++ page_cache_get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了page_cache_get函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: romfs_readpage
static int
romfs_readpage(struct file *file, struct page * page)
{
struct inode *inode = page->mapping->host;
loff_t offset, avail, readlen;
void *buf;
int result = -EIO;
page_cache_get(page);
lock_kernel();
buf = kmap(page);
if (!buf)
goto err_out;
/* 32 bit warning -- but not for us :) */
offset = page_offset(page);
if (offset < i_size_read(inode)) {
avail = inode->i_size-offset;
readlen = min_t(unsigned long, avail, PAGE_SIZE);
if (romfs_copyfrom(inode, buf, ROMFS_I(inode)->i_dataoffset+offset, readlen) == readlen) {
if (readlen < PAGE_SIZE) {
memset(buf + readlen,0,PAGE_SIZE-readlen);
}
SetPageUptodate(page);
result = 0;
}
}
示例2: __add_to_swap_cache
/*
* __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
int __add_to_swap_cache(struct page *page, swp_entry_t entry)
{
int error;
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageSwapCache(page));
VM_BUG_ON(!PageSwapBacked(page));
page_cache_get(page);
SetPageSwapCache(page);
set_page_private(page, entry.val);
spin_lock_irq(&swapper_space.tree_lock);
error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
if (likely(!error)) {
total_swapcache_pages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
INC_CACHE_INFO(add_total);
}
spin_unlock_irq(&swapper_space.tree_lock);
if (unlikely(error)) {
/*
* Only the context which have set SWAP_HAS_CACHE flag
* would call add_to_swap_cache().
* So add_to_swap_cache() doesn't returns -EEXIST.
*/
VM_BUG_ON(error == -EEXIST);
set_page_private(page, 0UL);
ClearPageSwapCache(page);
page_cache_release(page);
}
return error;
}
示例3: rotate_reclaimable_page
/*
* Writeback is about to end against a page which has been marked for immediate
* reclaim. If it still appears to be reclaimable, move it to the tail of the
* inactive list.
*
* Returns zero if it cleared PG_writeback.
*/
int rotate_reclaimable_page(struct page *page)
{
struct pagevec *pvec;
unsigned long flags;
if (PageLocked(page))
return 1;
if (PageDirty(page))
return 1;
if (PageActive(page))
return 1;
if (!PageLRU(page))
return 1;
page_cache_get(page);
local_irq_save(flags);
pvec = &__get_cpu_var(lru_rotate_pvecs);
if (!pagevec_add(pvec, page))
pagevec_move_tail(pvec);
local_irq_restore(flags);
if (!test_clear_page_writeback(page))
BUG();
return 0;
}
示例4: smb_writepage
/*
* Write a page to the server. This will be used for NFS swapping only
* (for now), and we currently do this synchronously only.
*
* We are called with the page locked and we unlock it when done.
*/
static int
smb_writepage(struct page *page, struct writeback_control *wbc)
{
struct address_space *mapping = page->mapping;
struct inode *inode;
unsigned long end_index;
unsigned offset = PAGE_CACHE_SIZE;
int err;
BUG_ON(!mapping);
inode = mapping->host;
BUG_ON(!inode);
end_index = inode->i_size >> PAGE_CACHE_SHIFT;
/* easy case */
if (page->index < end_index)
goto do_it;
/* things got complicated... */
offset = inode->i_size & (PAGE_CACHE_SIZE-1);
/* OK, are we completely out? */
if (page->index >= end_index+1 || !offset)
return 0; /* truncated - don't care */
do_it:
page_cache_get(page);
err = smb_writepage_sync(inode, page, 0, offset);
SetPageUptodate(page);
unlock_page(page);
page_cache_release(page);
return err;
}
示例5: __add_to_swap_cache
/*
* __add_to_swap_cache resembles add_to_page_cache on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
gfp_t gfp_mask)
{
int error;
BUG_ON(PageSwapCache(page));
BUG_ON(PagePrivate(page));
error = radix_tree_preload(gfp_mask);
if (!error) {
write_lock_irq(&swapper_space.tree_lock);
error = radix_tree_insert(&swapper_space.page_tree,
entry.val, page);
if (!error) {
page_cache_get(page);
SetPageLocked(page);
SetPageSwapCache(page);
set_page_private(page, entry.val);
total_swapcache_pages++;
pagecache_acct(1);
}
write_unlock_irq(&swapper_space.tree_lock);
radix_tree_preload_end();
}
return error;
}
示例6: dedup_get_block_page
/**
* get the page associated with the block inside our dedup structure
*/
struct page* dedup_get_block_page(sector_t block)
{
struct page *res = NULL;
// Check if the block is inside our range
if (dedup_is_in_range(block)) {
// Get the page pointer stored inside the dedup structure
res = blocksArray.pages[block - start_block];
if (res != NULL) {
// If its not NULL, check if the page is up to date and used
if (PageLRU(res) && PageUptodate(res)) {
page_cache_get(res);
}
else {
// Cannot use page, need to read from bdev
blocksArray.pages[block - start_block] = NULL;
res = NULL;
}
}
}
else
printk("get_block_page: block not in range.\n");
return res;
}
示例7: add_to_swap_cache
/*
* add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
int error;
BUG_ON(!PageLocked(page));
BUG_ON(PageSwapCache(page));
BUG_ON(PagePrivate(page));
BUG_ON(!PageSwapBacked(page));
error = radix_tree_preload(gfp_mask);
if (!error) {
page_cache_get(page);
SetPageSwapCache(page);
set_page_private(page, entry.val);
spin_lock_irq(&swapper_space.tree_lock);
error = radix_tree_insert(&swapper_space.page_tree,
entry.val, page);
if (likely(!error)) {
total_swapcache_pages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
INC_CACHE_INFO(add_total);
}
spin_unlock_irq(&swapper_space.tree_lock);
radix_tree_preload_end();
if (unlikely(error)) {
set_page_private(page, 0UL);
ClearPageSwapCache(page);
page_cache_release(page);
}
}
return error;
}
示例8: lru_cache_add
/**
* lru_cache_add: add a page to the page lists
* @page: the page to add
*/
void lru_cache_add(struct page *page)
{
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
page_cache_get(page);
if (!pagevec_add(pvec, page))
__pagevec_lru_add(pvec);
put_cpu_var(lru_add_pvecs);
}
示例9: get_user_pages
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
int len, int write, int force, struct page **pages, struct vm_area_struct **vmas)
{
int i = 0;
do {
struct vm_area_struct * vma;
vma = find_extend_vma(mm, start);
if ( !vma ||
(!force &&
((write && (!(vma->vm_flags & VM_WRITE))) ||
(!write && (!(vma->vm_flags & VM_READ))) ) )) {
if (i) return i;
return -EFAULT;
}
spin_lock(&mm->page_table_lock);
do {
struct page *map;
while (!(map = follow_page(mm, start, write))) {
spin_unlock(&mm->page_table_lock);
switch (handle_mm_fault(mm, vma, start, write)) {
case 1:
tsk->min_flt++;
break;
case 2:
tsk->maj_flt++;
break;
case 0:
if (i) return i;
return -EFAULT;
default:
if (i) return i;
return -ENOMEM;
}
spin_lock(&mm->page_table_lock);
}
if (pages) {
pages[i] = get_page_map(map);
/* FIXME: call the correct function,
* depending on the type of the found page
*/
if (pages[i])
page_cache_get(pages[i]);
}
if (vmas)
vmas[i] = vma;
i++;
start += PAGE_SIZE;
len--;
} while(len && start < vma->vm_end);
spin_unlock(&mm->page_table_lock);
} while(len);
return i;
}
示例10: lru_cache_add_active
void fastcall lru_cache_add_active(struct page *page)
{
struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
page_cache_get(page);
if (!pagevec_add(pvec, page))
__pagevec_lru_add_active(pvec);
put_cpu_var(lru_add_active_pvecs);
}
示例11: __lru_cache_add
void __lru_cache_add(struct page *page, enum lru_list lru)
{
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
page_cache_get(page);
if (!pagevec_add(pvec, page))
____pagevec_lru_add(pvec, lru);
put_cpu_var(lru_add_pvecs);
}
示例12: __lru_cache_add
/*
* Order of operations is important: flush the pagevec when it's already
* full, not when adding the last page, to make sure that last page is
* not added to the LRU directly when passed to this function. Because
* mark_page_accessed() (called after this when writing) only activates
* pages that are on the LRU, linear writes in subpage chunks would see
* every PAGEVEC_SIZE page activated, which is unexpected.
*/
void __lru_cache_add(struct page *page, enum lru_list lru)
{
struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvecs)[lru];
page_cache_get(page);
if (!pagevec_space(pvec))
__pagevec_lru_add(pvec, lru);
pagevec_add(pvec, page);
put_locked_var(swapvec_lock, lru_add_pvecs);
}
示例13: __lru_cache_add
/**
* __lru_cache_add:page加入到lru类型的lru_add_pvecs页缓存中
*/
void __lru_cache_add(struct page *page, enum lru_list lru)
{
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
page_cache_get(page);
/*加入页缓存中,当也缓存满时,才加入到page对应
* 的zone的lru链表中*/
if (!pagevec_add(pvec, page))
____pagevec_lru_add(pvec, lru);
put_cpu_var(lru_add_pvecs);
}
示例14: lru_cache_add
/**
* lru_cache_add: add a page to the page lists
* @page: the page to add
*/
void fastcall lru_cache_add(struct page *page)
{
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
page_cache_get(page);
// dyc: if no space left in pvec, add all pages into zone's inactive list
if (!pagevec_add(pvec, page)) {
__pagevec_lru_add(pvec);
}
put_cpu_var(lru_add_pvecs);
}
示例15: smb_readpage
/*
* We are called with the page locked and we unlock it when done.
*/
static int
smb_readpage(struct file *file, struct page *page)
{
int error;
struct dentry *dentry = file->f_path.dentry;
page_cache_get(page);
error = smb_readpage_sync(dentry, page);
page_cache_release(page);
return error;
}