当前位置: 首页>>代码示例>>C++>>正文


C++ pte_none函数代码示例

本文整理汇总了C++中pte_none函数的典型用法代码示例。如果您正苦于以下问题:C++ pte_none函数的具体用法?C++ pte_none怎么用?C++ pte_none使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了pte_none函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: pagefault_disable

void *kmap_atomic(struct page *page)
{
	unsigned int idx;
	unsigned long vaddr;
	void *kmap;
	int type;

	pagefault_disable();
	//POS (Cheolhee Lee)
	if (!(PageHighMem(page) || PageNVRAM(page)))
		return page_address(page);

#ifdef CONFIG_DEBUG_HIGHMEM
	/*
	 * There is no cache coherency issue when non VIVT, so force the
	 * dedicated kmap usage for better debugging purposes in that case.
	 */
	if (!cache_is_vivt())
		kmap = NULL;
	else
#endif
		kmap = kmap_high_get(page);
	if (kmap)
		return kmap;

	type = kmap_atomic_idx_push();

	idx = type + KM_TYPE_NR * smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
	/*
	 * With debugging enabled, kunmap_atomic forces that entry to 0.
	 * Make sure it was indeed properly unmapped.
	 */
	BUG_ON(!pte_none(get_top_pte(vaddr)));
#endif
	/*
	 * When debugging is off, kunmap_atomic leaves the previous mapping
	 * in place, so the contained TLB flush ensures the TLB is updated
	 * with the new mapping.
	 */
	set_top_pte(vaddr, mk_pte(page, kmap_prot));

	return (void *)vaddr;
}
开发者ID:chl4651,项目名称:HEAPO,代码行数:45,代码来源:highmem.c

示例2: BUG_ON

pte_t *huge_pte_alloc(struct mm_struct *mm,
		      unsigned long addr, unsigned long sz)
{
	pgd_t *pgd;
	pud_t *pud;
	pte_t *pte = NULL;

	/* We do not yet support multiple huge page sizes. */
	BUG_ON(sz != PMD_SIZE);

	pgd = pgd_offset(mm, addr);
	pud = pud_alloc(mm, pgd, addr);
	if (pud)
		pte = (pte_t *) pmd_alloc(mm, pud, addr);
	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));

	return pte;
}
开发者ID:1703011,项目名称:asuswrt-merlin,代码行数:18,代码来源:hugetlbpage.c

示例3: unmap_ptes

static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
		       phys_addr_t addr, phys_addr_t end)
{
	phys_addr_t start_addr = addr;
	pte_t *pte, *start_pte;

	start_pte = pte = pte_offset_kernel(pmd, addr);
	do {
		if (!pte_none(*pte)) {
			kvm_set_pte(pte, __pte(0));
			put_page(virt_to_page(pte));
			kvm_tlb_flush_vmid_ipa(kvm, addr);
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);

	if (kvm_pte_table_empty(kvm, start_pte))
		clear_pmd_entry(kvm, pmd, start_addr);
}
开发者ID:AppliedMicro,项目名称:ENGLinuxLatest,代码行数:18,代码来源:mmu.c

示例4: BUG_ON

pte_t *huge_pte_alloc(struct mm_struct *mm,
		      unsigned long addr, unsigned long sz)
{
	pgd_t *pgd;
	pud_t *pud;
	pte_t *pte = NULL;

	
	BUG_ON(sz != PMD_SIZE);

	pgd = pgd_offset(mm, addr);
	pud = pud_alloc(mm, pgd, addr);
	if (pud)
		pte = (pte_t *) pmd_alloc(mm, pud, addr);
	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));

	return pte;
}
开发者ID:DirtyDroidX,项目名称:android_kernel_htc_m8ul,代码行数:18,代码来源:hugetlbpage.c

示例5: pagefault_disable

void *kmap_atomic(struct page *page, enum km_type type)
{
	unsigned int idx;
	unsigned long vaddr;
	void *kmap;

	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);

	debug_kmap_atomic(type);

#ifdef CONFIG_DEBUG_HIGHMEM
	/*
	 * There is no cache coherency issue when non VIVT, so force the
	 * dedicated kmap usage for better debugging purposes in that case.
	 */
	if (!cache_is_vivt())
		kmap = NULL;
	else
#endif
		kmap = kmap_high_get(page);
	if (kmap)
		return kmap;

	idx = type + KM_TYPE_NR * smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
	/*
	 * With debugging enabled, kunmap_atomic forces that entry to 0.
	 * Make sure it was indeed properly unmapped.
	 */
	BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
#endif
	set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
	/*
	 * When debugging is off, kunmap_atomic leaves the previous mapping
	 * in place, so this TLB flush ensures the TLB is updated with the
	 * new mapping.
	 */
	local_flush_tlb_kernel_page(vaddr);

	return (void *)vaddr;
}
开发者ID:123456798wil,项目名称:kernel_dell_streak7,代码行数:44,代码来源:highmem.c

示例6: consistent_free

/*
 * free page(s) as defined by the above mapping.
 */
void consistent_free(size_t size, void *vaddr)
{
	struct page *page;

	if (in_interrupt())
		BUG();

	size = PAGE_ALIGN(size);

#ifndef CONFIG_MMU
	/* Clear SHADOW_MASK bit in address, and free as per usual */
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
	vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
# endif
	page = virt_to_page(vaddr);

	do {
		__free_reserved_page(page);
		page++;
	} while (size -= PAGE_SIZE);
#else
	do {
		pte_t *ptep;
		unsigned long pfn;

		ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
						(unsigned int)vaddr),
					(unsigned int)vaddr),
				(unsigned int)vaddr);
		if (!pte_none(*ptep) && pte_present(*ptep)) {
			pfn = pte_pfn(*ptep);
			pte_clear(&init_mm, (unsigned int)vaddr, ptep);
			if (pfn_valid(pfn)) {
				page = pfn_to_page(pfn);
				__free_reserved_page(page);
			}
		}
		vaddr += PAGE_SIZE;
	} while (size -= PAGE_SIZE);

	/* flush tlb */
	flush_tlb_all();
#endif
}
开发者ID:1800alex,项目名称:linux,代码行数:47,代码来源:consistent.c

示例7: pagefault_disable

/*
 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 * no global lock is needed and because the kmap code must perform a global TLB
 * invalidation when the kmap pool wraps.
 *
 * However when holding an atomic kmap it is not legal to sleep, so atomic
 * kmaps are appropriate for short, tight code paths only.
 */
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
	unsigned long vaddr;
	int idx, type;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	pagefault_disable();

	if (!PageHighMem(page))
		return page_address(page);

	type = kmap_atomic_idx_push();
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	BUG_ON(!pte_none(*(kmap_pte-idx)));
	set_pte(kmap_pte-idx, mk_pte(page, prot));

	return (void *)vaddr;
}
开发者ID:kozmikkick,项目名称:eternityprj-kernel-endeavoru-128,代码行数:27,代码来源:highmem_32.c

示例8: shmedia_mapioaddr

static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep, pte;
	pgprot_t prot;
	unsigned long flags = 1; /* 1 = CB0-1 device */

	pr_debug("shmedia_mapiopage pa %08lx va %08lx\n",  pa, va);

	pgdp = pgd_offset_k(va);
	if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
		pudp = (pud_t *)sh64_get_page();
		set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
	}

	pudp = pud_offset(pgdp, va);
	if (pud_none(*pudp) || !pud_present(*pudp)) {
		pmdp = (pmd_t *)sh64_get_page();
		set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
	}

	pmdp = pmd_offset(pudp, va);
	if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) {
		ptep = (pte_t *)sh64_get_page();
		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
	}

	prot = __pgprot(_PAGE_PRESENT | _PAGE_READ     | _PAGE_WRITE  |
			_PAGE_DIRTY   | _PAGE_ACCESSED | _PAGE_SHARED | flags);

	pte = pfn_pte(pa >> PAGE_SHIFT, prot);
	ptep = pte_offset_kernel(pmdp, va);

	if (!pte_none(*ptep) &&
	    pte_val(*ptep) != pte_val(pte))
		pte_ERROR(*ptep);

	set_pte(ptep, pte);

	flush_tlb_kernel_range(va, PAGE_SIZE);
}
开发者ID:mobilipia,项目名称:iods,代码行数:43,代码来源:ioremap_64.c

示例9: pagefault_disable

/*
 * This is the same as kmap_atomic() but can map memory that doesn't
 * have a struct page associated with it.
 */
void *kmap_atomic_pfn(unsigned long pfn)
{
	enum fixed_addresses idx;
	unsigned long vaddr;
	int type;

	pagefault_disable();

	type = kmap_atomic_idx_push();
	idx = type + KM_TYPE_NR * smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
	BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
	set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL));
	flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);

	return (void *)vaddr;
}
开发者ID:03199618,项目名称:linux,代码行数:23,代码来源:highmem.c

示例10: callback_page_walk

static inline
int callback_page_walk(pte_t *pte, unsigned long addr, unsigned long next_addr, struct mm_walk *walk)
{

	if (pte_none(*pte) || !pte_present(*pte)
		/* || !pte_young(*pte)
		 || pte_special(*pte) */
		)
		return 0;

	proc[(long)walk->private].next_addr = addr;

	/* TODO: try pte_mknuma on 3.8 */
	*pte = pte_clear_flags(*pte, _PAGE_PRESENT);

	spcd_pf_extra++;

	return 1;
}
开发者ID:matthiasdiener,项目名称:cdsm,代码行数:19,代码来源:pagefault.c

示例11: pagefault_disable

void *kmap_atomic(struct page *page)
{
	enum fixed_addresses idx;
	unsigned long vaddr;

	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);

	idx = kmap_idx(kmap_atomic_idx_push(),
		       DCACHE_ALIAS(page_to_phys(page)));
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
	BUG_ON(!pte_none(*(kmap_pte + idx)));
#endif
	set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC));

	return (void *)vaddr;
}
开发者ID:0x000000FF,项目名称:edison-linux,代码行数:19,代码来源:highmem.c

示例12: remap_area_pte

static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
                                  unsigned long phys_addr, unsigned long flags)
{
    unsigned long end;

    address &= ~PMD_MASK;
    end = address + size;
    if (end > PMD_SIZE)
        end = PMD_SIZE;
    do {
        if (!pte_none(*pte))
            printk("remap_area_pte: page already exists\n");
        set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT |
                                 _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
        address += PAGE_SIZE;
        phys_addr += PAGE_SIZE;
        pte++;
    } while (address < end);
}
开发者ID:empeg,项目名称:empeg-hijack,代码行数:19,代码来源:ioremap.c

示例13: get_gate_page

static int get_gate_page(struct mm_struct *mm, unsigned long address,
		unsigned int gup_flags, struct vm_area_struct **vma,
		struct page **page)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int ret = -EFAULT;

	/* user gate pages are read-only */
	if (gup_flags & FOLL_WRITE)
		return -EFAULT;
	if (address > TASK_SIZE)
		pgd = pgd_offset_k(address);
	else
		pgd = pgd_offset_gate(mm, address);
	BUG_ON(pgd_none(*pgd));
	pud = pud_offset(pgd, address);
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return -EFAULT;
	VM_BUG_ON(pmd_trans_huge(*pmd));
	pte = pte_offset_map(pmd, address);
	if (pte_none(*pte))
		goto unmap;
	*vma = get_gate_vma(mm);
	if (!page)
		goto out;
	*page = vm_normal_page(*vma, address, *pte);
	if (!*page) {
		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
			goto unmap;
		*page = pte_page(*pte);
	}
	get_page(*page);
out:
	ret = 0;
unmap:
	pte_unmap(pte);
	return ret;
}
开发者ID:LarryShang,项目名称:linux,代码行数:43,代码来源:gup.c

示例14: do_anonymous_page

/*
 * We are called with the MM semaphore and page_table_lock
 * spinlock held to protect against concurrent faults in
 * multithreaded programs. 
 */
static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, pte_t *page_table, int write_access, unsigned long addr)
{
	pte_t entry;

	/* Read-only mapping of ZERO_PAGE. */
	entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));

	/* ..except if it's a write access */
	if (write_access) {
		struct page *page;

		/* Allocate our own private page. */
		spin_unlock(&mm->page_table_lock);

		page = alloc_page(GFP_HIGHUSER);
		if (!page)
			goto no_mem;
		clear_user_highpage(page, addr);

		spin_lock(&mm->page_table_lock);
		if (!pte_none(*page_table)) {
			page_cache_release(page);
			spin_unlock(&mm->page_table_lock);
			return 1;
		}
		mm->rss++;
		flush_page_to_ram(page);
		entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
		lru_cache_add(page);
		mark_page_accessed(page);
	}

	set_pte(page_table, entry);

	/* No need to invalidate - it was non-present before */
	update_mmu_cache(vma, addr, entry);
	spin_unlock(&mm->page_table_lock);
	return 1;	/* Minor fault */

no_mem:
	return -1;
}
开发者ID:iwangv,项目名称:edimax-br-6528n,代码行数:47,代码来源:memory.c

示例15: forall_pte_pmd

/*
 * Again because of the changes in page table walking, a 2.4 and 2.5
 * version is supplied
 */
inline unsigned long forall_pte_pmd(struct mm_struct *mm, pmd_t *pmd, 
		unsigned long start, unsigned long end, 
		unsigned long *sched_count,
		void *data,
		unsigned long (*func)(pte_t *, unsigned long, void *))
{
	
	pte_t *ptep, pte;
	unsigned long pmd_end;
	unsigned long ret=0;

	if (pmd_none(*pmd)) return 0;

	pmd_end = (start + PMD_SIZE) & PMD_MASK;
	if (end > pmd_end) end = pmd_end;

	do {
		preempt_disable();
		ptep = pte_offset_map(pmd, start);
		pte = *ptep;
		pte_unmap(ptep);
		preempt_enable();

		/* Call the if a PTE is available */
		if (!pte_none(pte)) {

			/*
			 * Call schedule if necessary
			 *	Can func() block or be preempted?
			 *	It seems the sched_count won't be guarnateed
			 *	accurate.
			 */
			spin_unlock(&mm->page_table_lock);
			check_resched(sched_count);
			ret += func(&pte, start, data);
			spin_lock(&mm->page_table_lock);
		}
		start += PAGE_SIZE;
	} while (start && (start < end));

	return ret;
}
开发者ID:baozich,项目名称:scripts,代码行数:46,代码来源:pagetable.c


注:本文中的pte_none函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。