本文整理汇总了C++中page_mapped函数的典型用法代码示例。如果您正苦于以下问题:C++ page_mapped函数的具体用法?C++ page_mapped怎么用?C++ page_mapped使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了page_mapped函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: __flush_anon_page
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
#ifdef CONFIG_RALINK_SOC
if (!PageHighMem(page)) {
unsigned long addr = (unsigned long) page_address(page);
if (pages_do_alias(addr, vmaddr & PAGE_MASK)) {
if (page_mapped(page) && !Page_dcache_dirty(page)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);
flush_data_cache_page((unsigned long)kaddr);
kunmap_coherent();
} else {
flush_data_cache_page(addr);
ClearPageDcacheDirty(page);
}
}
} else {
void *laddr = lowmem_page_address(page);
if (pages_do_alias((unsigned long)laddr, vmaddr & PAGE_MASK)) {
if (page_mapped(page) && !Page_dcache_dirty(page)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);
flush_data_cache_page((unsigned long)kaddr);
kunmap_coherent();
} else {
void *kaddr;
kaddr = kmap_atomic(page, KM_PTE1);
flush_data_cache_page((unsigned long)kaddr);
kunmap_atomic(kaddr, KM_PTE1);
ClearPageDcacheDirty(page);
}
}
}
#else
unsigned long addr = (unsigned long) page_address(page);
if (pages_do_alias(addr, vmaddr)) {
if (page_mapped(page) && !Page_dcache_dirty(page)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);
flush_data_cache_page((unsigned long)kaddr);
kunmap_coherent();
} else
flush_data_cache_page(addr);
}
#endif
}
示例2: invalidate_inode_pages2
/**
* invalidate_inode_pages2 - remove all unmapped pages from an address_space
* @mapping - the address_space
*
* invalidate_inode_pages2() is like truncate_inode_pages(), except for the case
* where the page is seen to be mapped into process pagetables. In that case,
* the page is marked clean but is left attached to its address_space.
*
* The page is also marked not uptodate so that a subsequent pagefault will
* perform I/O to bringthe page's contents back into sync with its backing
* store.
*
* FIXME: invalidate_inode_pages2() is probably trivially livelockable.
*/
void invalidate_inode_pages2(struct address_space *mapping)
{
struct pagevec pvec;
pgoff_t next = 0;
int i;
pagevec_init(&pvec, 0);
while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
lock_page(page);
if (page->mapping == mapping) { /* truncate race? */
wait_on_page_writeback(page);
next = page->index + 1;
if (page_mapped(page)) {
clear_page_dirty(page);
ClearPageUptodate(page);
} else {
if (!invalidate_complete_page(mapping,
page)) {
clear_page_dirty(page);
ClearPageUptodate(page);
}
}
}
unlock_page(page);
}
pagevec_release(&pvec);
cond_resched();
}
}
示例3: check_tlb_entry
/*
* Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
* and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
*
* Check that valid TLB entries either have the same PA as the PTE, or PTE is
* marked as non-present. Non-present PTE and the page with non-zero refcount
* and zero mapcount is normal for batched TLB flush operation. Zero refcount
* means that the page was freed prematurely. Non-zero mapcount is unusual,
* but does not necessary means an error, thus marked as suspicious.
*/
static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
{
unsigned tlbidx = w | (e << PAGE_SHIFT);
unsigned r0 = dtlb ?
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
unsigned pte = get_pte_for_vaddr(vpn);
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
unsigned tlb_asid = r0 & ASID_MASK;
bool kernel = tlb_asid == 1;
int rc = 0;
if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
dtlb ? 'D' : 'I', w, e, vpn,
kernel ? "kernel" : "user");
rc |= TLB_INSANE;
}
if (tlb_asid == mm_asid) {
unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
read_itlb_translation(tlbidx);
if ((pte ^ r1) & PAGE_MASK) {
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
dtlb ? 'D' : 'I', w, e, r0, r1, pte);
if (pte == 0 || !pte_present(__pte(pte))) {
struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
pr_err("page refcount: %d, mapcount: %d\n",
page_count(p),
page_mapcount(p));
if (!page_count(p))
rc |= TLB_INSANE;
else if (page_mapped(p))
rc |= TLB_SUSPICIOUS;
} else {
示例4: copy_user_highpage
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *vfrom, *vto;
vto = kmap_atomic(to);
if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
test_bit(PG_dcache_clean, &from->flags)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent(vfrom);
} else {
vfrom = kmap_atomic(from);
copy_page(vto, vfrom);
kunmap_atomic(vfrom);
}
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
(vma->vm_flags & VM_EXEC))
__flush_purge_region(vto, PAGE_SIZE);
kunmap_atomic(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
示例5: __update_cache
void __update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)
{
struct page *page;
unsigned long pfn, addr;
int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
pfn = pte_pfn(pte);
if (unlikely(!pfn_valid(pfn))) {
wmb();
return;
}
page = pfn_to_page(pfn);
if (page_mapped(page) && Page_dcache_dirty(page)) {
void *kaddr = NULL;
if (PageHighMem(page)) {
addr = (unsigned long)kmap_atomic(page);
kaddr = (void *)addr;
} else
addr = (unsigned long) page_address(page);
if (exec || (cpu_has_dc_aliases &&
pages_do_alias(addr, address & PAGE_MASK))) {
flush_data_cache_page(addr);
ClearPageDcacheDirty(page);
}
if (kaddr)
kunmap_atomic((void *)kaddr);
}
wmb(); /* finish any outstanding arch cache flushes before ret to user */
}
示例6: free_swap_cache
/**
* free_swap_cache:page从交换区高速缓存中删除
*/
static inline void free_swap_cache(struct page *page)
{
if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
try_to_free_swap(page);
unlock_page(page);
}
}
示例7: flush_dcache_page
/*
* Handle cache congruency of kernel and userspace mappings of page when kernel
* writes-to/reads-from
*
* The idea is to defer flushing of kernel mapping after a WRITE, possible if:
* -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
* -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
* -In SMP, if hardware caches are coherent
*
* There's a corollary case, where kernel READs from a userspace mapped page.
* If the U-mapping is not congruent to to K-mapping, former needs flushing.
*/
void flush_dcache_page(struct page *page)
{
struct address_space *mapping;
if (!cache_is_vipt_aliasing()) {
clear_bit(PG_dc_clean, &page->flags);
return;
}
/* don't handle anon pages here */
mapping = page_mapping(page);
if (!mapping)
return;
/*
* pagecache page, file not yet mapped to userspace
* Make a note that K-mapping is dirty
*/
if (!mapping_mapped(mapping)) {
clear_bit(PG_dc_clean, &page->flags);
} else if (page_mapped(page)) {
/* kernel reading from page with U-mapping */
void *paddr = page_address(page);
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
if (addr_not_cache_congruent(paddr, vaddr))
__flush_dcache_page(paddr, vaddr);
}
}
示例8: truncate_inode_page
int truncate_inode_page(struct address_space *mapping, struct page *page)
{
if (page_mapped(page)) {
unmap_mapping_range(mapping,
(loff_t)page->index << PAGE_CACHE_SHIFT,
PAGE_CACHE_SIZE, 0);
}
return truncate_complete_page(mapping, page);
}
示例9: __replace_page
/**
* __replace_page - replace page in vma by new page.
* based on replace_page in mm/ksm.c
*
* @vma: vma that holds the pte pointing to page
* @addr: address the old @page is mapped at
* @page: the cowed page we are replacing by kpage
* @kpage: the modified page we replace page by
*
* Returns 0 on success, -EFAULT on failure.
*/
static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
struct page *old_page, struct page *new_page)
{
struct mm_struct *mm = vma->vm_mm;
spinlock_t *ptl;
pte_t *ptep;
int err;
/* For mmu_notifiers */
const unsigned long mmun_start = addr;
const unsigned long mmun_end = addr + PAGE_SIZE;
struct mem_cgroup *memcg;
err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
false);
if (err)
return err;
/* For try_to_free_swap() and munlock_vma_page() below */
lock_page(old_page);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
err = -EAGAIN;
ptep = page_check_address(old_page, mm, addr, &ptl, 0);
if (!ptep) {
mem_cgroup_cancel_charge(new_page, memcg, false);
goto unlock;
}
get_page(new_page);
page_add_new_anon_rmap(new_page, vma, addr, false);
mem_cgroup_commit_charge(new_page, memcg, false, false);
lru_cache_add_active_or_unevictable(new_page, vma);
if (!PageAnon(old_page)) {
dec_mm_counter(mm, mm_counter_file(old_page));
inc_mm_counter(mm, MM_ANONPAGES);
}
flush_cache_page(vma, addr, pte_pfn(*ptep));
ptep_clear_flush_notify(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot));
page_remove_rmap(old_page, false);
if (!page_mapped(old_page))
try_to_free_swap(old_page);
pte_unmap_unlock(ptep, ptl);
if (vma->vm_flags & VM_LOCKED)
munlock_vma_page(old_page);
put_page(old_page);
err = 0;
unlock:
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
unlock_page(old_page);
return err;
}
示例10: invalidate_inode_page
/*
* Safely invalidate one page from its pagecache mapping.
* It only drops clean, unused pages. The page must be locked.
*
* Returns 1 if the page is successfully invalidated, otherwise 0.
*/
int invalidate_inode_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
if (!mapping)
return 0;
if (PageDirty(page) || PageWriteback(page))
return 0;
if (page_mapped(page))
return 0;
return invalidate_complete_page(mapping, page);
}
示例11: tux3_set_page_dirty_bug
static int tux3_set_page_dirty_bug(struct page *page)
{
/* See comment of tux3_set_page_dirty() */
ClearPageReclaim(page);
assert(0);
/* This page should not be mmapped */
assert(!page_mapped(page));
/* This page should be dirty already, otherwise we will lost data. */
assert(PageDirty(page));
return 0;
}
示例12: truncate_inode_page
int truncate_inode_page(struct address_space *mapping, struct page *page)
{
loff_t holelen;
VM_BUG_ON_PAGE(PageTail(page), page);
holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
if (page_mapped(page)) {
unmap_mapping_range(mapping,
(loff_t)page->index << PAGE_SHIFT,
holelen, 0);
}
return truncate_complete_page(mapping, page);
}
示例13: copy_from_user_page
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
test_bit(PG_dcache_clean, &page->flags)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent(vfrom);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
clear_bit(PG_dcache_clean, &page->flags);
}
}
示例14: __flush_anon_page
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
unsigned long addr = (unsigned long) page_address(page);
if (pages_do_alias(addr, vmaddr)) {
if (page_mapped(page) && !Page_dcache_dirty(page)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);
flush_data_cache_page((unsigned long)kaddr);
kunmap_coherent();
} else
flush_data_cache_page(addr);
}
}
示例15: lru_deactivate_fn
/*
* If the page can not be invalidated, it is moved to the
* inactive list to speed up its reclaim. It is moved to the
* head of the list, rather than the tail, to give the flusher
* threads some time to write it out, as this is much more
* effective than the single-page writeout from reclaim.
*
* If the page isn't page_mapped and dirty/writeback, the page
* could reclaim asap using PG_reclaim.
*
* 1. active, mapped page -> none
* 2. active, dirty/writeback page -> inactive, head, PG_reclaim
* 3. inactive, mapped page -> none
* 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
* 5. inactive, clean -> inactive, tail
* 6. Others -> none
*
* In 4, why it moves inactive's head, the VM expects the page would
* be write it out by flusher threads as this is much more effective
* than the single-page writeout from reclaim.
*/
static void lru_deactivate_fn(struct page *page, void *arg)
{
int lru, file;
bool active;
struct zone *zone = page_zone(page);
if (!PageLRU(page))
return;
if (PageUnevictable(page))
return;
/* Some processes are using the page */
if (page_mapped(page))
return;
active = PageActive(page);
file = page_is_file_cache(page);
lru = page_lru_base_type(page);
del_page_from_lru_list(zone, page, lru + active);
ClearPageActive(page);
ClearPageReferenced(page);
add_page_to_lru_list(zone, page, lru);
if (PageWriteback(page) || PageDirty(page)) {
/*
* PG_reclaim could be raced with end_page_writeback
* It can make readahead confusing. But race window
* is _really_ small and it's non-critical problem.
*/
SetPageReclaim(page);
} else {
struct lruvec *lruvec;
/*
* The page's writeback ends up during pagevec
* We moves tha page into tail of inactive.
*/
lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]);
__count_vm_event(PGROTATED);
}
if (active)
__count_vm_event(PGDEACTIVATE);
update_page_reclaim_stat(zone, page, file, 0);
}