当前位置: 首页>>代码示例>>C++>>正文


C++ page_count函数代码示例

本文整理汇总了C++中page_count函数的典型用法代码示例。如果您正苦于以下问题:C++ page_count函数的具体用法?C++ page_count怎么用?C++ page_count使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了page_count函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: generic_pipe_buf_steal

/**
 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
 * @pipe:	the pipe that the buffer belongs to
 * @buf:	the buffer to attempt to steal
 *
 * Description:
 *	This function attempts to steal the &struct page attached to
 *	@buf. If successful, this function returns 0 and returns with
 *	the page locked. The caller may then reuse the page for whatever
 *	he wishes; the typical use is insertion into a different file
 *	page cache.
 */
int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
			   struct pipe_buffer *buf)
{
	struct page *page = buf->page;

	/*
	 * A reference of one is golden, that means that the owner of this
	 * page is the only one holding a reference to it. lock the page
	 * and return OK.
	 */
	if (page_count(page) == 1) {
		lock_page(page);
		return 0;
	}

	return 1;
}
开发者ID:andy-padavan,项目名称:rt-n56u,代码行数:29,代码来源:pipe.c

示例2: __gnttab_unmap_refs_async

static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
{
	int ret;
	int pc;

	for (pc = 0; pc < item->count; pc++) {
		if (page_count(item->pages[pc]) > 1) {
			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
			schedule_delayed_work(&item->gnttab_work,
					      msecs_to_jiffies(delay));
			return;
		}
	}

	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
				item->pages, item->count);
	item->done(ret, item);
}
开发者ID:DenisLug,项目名称:mptcp,代码行数:18,代码来源:grant-table.c

示例3: vnic_destroy_allocator

static void vnic_destroy_allocator(struct vnic_rx_ring *ring)
{
	struct vnic_rx_alloc *page_alloc;
	int i;

	if (vnic_rx_linear)
		return;

	for (i = 0; i < ring->num_frags; i++) {
		page_alloc = &ring->page_alloc[i];
		vnic_dbg_data(ring->port->name, "Freeing allocator:%d count:%d\n",
			      i, page_count(page_alloc->page));
		if (page_alloc->page) {
			put_page(page_alloc->page);
			page_alloc->page = NULL;
		}
	}
}
开发者ID:u9621071,项目名称:kernel-uek-UEK3,代码行数:18,代码来源:vnic_data_rx.c

示例4: do_no_page

/*
 * do_no_page() tries to create a new page mapping. It aggressively
 * tries to share with existing pages, but makes a separate copy if
 * the "write_access" parameter is true in order to avoid the next
 * page fault.
 *
 * As this is called only for pages that do not currently exist, we
 * do not need to flush old virtual caches or the TLB.
 *
 * This is called with the MM semaphore held.
 */
static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
	unsigned long address, int write_access, pte_t *page_table)
{
	struct page * new_page;
	pte_t entry;

	if (!vma->vm_ops || !vma->vm_ops->nopage)
		return do_anonymous_page(mm, vma, page_table, write_access, address);

	/*
	 * The third argument is "no_share", which tells the low-level code
	 * to copy, not share the page even if sharing is possible.  It's
	 * essentially an early COW detection.
	 */
	new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, (vma->vm_flags & VM_SHARED)?0:write_access);
	if (new_page == NULL)	/* no page was available -- SIGBUS */
		return 0;
	if (new_page == NOPAGE_OOM)
		return -1;
	++mm->rss;
	/*
	 * This silly early PAGE_DIRTY setting removes a race
	 * due to the bad i386 page protection. But it's valid
	 * for other architectures too.
	 *
	 * Note that if write_access is true, we either now have
	 * an exclusive copy of the page, or this is a shared mapping,
	 * so we can make it writable and dirty to avoid having to
	 * handle that later.
	 */
	flush_page_to_ram(new_page);
	flush_icache_page(vma, new_page);
	entry = mk_pte(new_page, vma->vm_page_prot);
	if (write_access) {
		entry = pte_mkwrite(pte_mkdirty(entry));
	} else if (page_count(new_page) > 1 &&
		   !(vma->vm_flags & VM_SHARED))
		entry = pte_wrprotect(entry);
	set_pte(page_table, entry);
	/* no need to invalidate: a not-present page shouldn't be cached */
	update_mmu_cache(vma, address, entry);
	return 2;	/* Major fault */
}
开发者ID:davidbau,项目名称:davej,代码行数:54,代码来源:memory.c

示例5: split_page

/*
 * split_page takes a non-compound higher-order page, and splits it into
 * n (1<<order) sub-pages: page[0..n]
 * Each sub-page must be freed individually.
 *
 * Note: this is probably too low level an operation for use in drivers.
 * Please consult with lkml before using this in your driver.
 */
void split_page(struct page *page, unsigned int order)
{
	int i;

	VM_BUG_ON(PageCompound(page));
	VM_BUG_ON(!page_count(page));

#ifdef CONFIG_KMEMCHECK
	/*
	 * Split shadow pages too, because free(page[0]) would
	 * otherwise free the whole shadow.
	 */
	if (kmemcheck_page_is_tracked(page))
		split_page(virt_to_page(page[0].shadow), order);
#endif

	for (i = 1; i < (1 << order); i++)
		set_page_refcounted(page + i);
}
开发者ID:Razziell,项目名称:R-Kernel,代码行数:27,代码来源:backport-3.10.c

示例6: homecache_change_page_home

void homecache_change_page_home(struct page *page, int order, int home)
{
	int i, pages = (1 << order);
	unsigned long kva;

	BUG_ON(PageHighMem(page));
	BUG_ON(page_count(page) > 1);
	BUG_ON(page_mapcount(page) != 0);
	kva = (unsigned long) page_address(page);
	flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
		     kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
		     NULL, 0);

	for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
		pte_t *ptep = virt_to_pte(NULL, kva);
		pte_t pteval = *ptep;
		BUG_ON(!pte_present(pteval) || pte_huge(pteval));
		*ptep = pte_set_home(pteval, home);
	}
}
开发者ID:ANFS,项目名称:ANFS-kernel,代码行数:20,代码来源:homecache.c

示例7: simple_vma_fault

int simple_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{	
	size_t size = vma->vm_end - vma->vm_start;
	int i;
	int ret;
	struct page *map_page;
	char *ptr;

	printk(KERN_NOTICE "vmf:fault=%p flag=%x offset=%lx\n",
	       vmf->virtual_address,
	       vmf->flags,
	       vmf->pgoff);
	printk(KERN_NOTICE "vma:start=0x%lx size=%x pgoff=%lx\n",
	       vma->vm_start,
	       (int)size,
	       vma->vm_pgoff);
	       
	vma->vm_flags |= VM_MIXEDMAP;
	map_page = data_pages[vmf->pgoff];

	ret = vm_insert_mixed(vma,
			      (unsigned long)vmf->virtual_address,
			      page_to_pfn(map_page));
	printk(KERN_NOTICE "ret=%d pfn=%x vaddr=0x%lx cnt=%d\n",
	       ret,
	       (int)page_to_pfn(map_page),
	       (unsigned long)vmf->virtual_address,
	       page_count(map_page));

	/*
	 * Example of the "Direct I/O"
	 */
	ptr = kmap_atomic(map_page);
	for (i = 0; i < 100; i++) {
		ptr[i] = (unsigned char)vmf->pgoff;
	}
	kunmap_atomic(ptr);
	SetPageDirty(map_page);

	return VM_FAULT_NOPAGE;
}
开发者ID:gurugio,项目名称:ldd,代码行数:41,代码来源:drv_vma_fault.c

示例8: allocate

    void * allocate( std::size_t size) const
    {
        BOOST_ASSERT( minimum_stacksize() <= size);
        BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) );

        const std::size_t pages( page_count( size) + 1); // add one guard page
        const std::size_t size_ = pages * pagesize();
        BOOST_ASSERT( 0 < size && 0 < size_);

        void * limit = ::VirtualAlloc( 0, size_, MEM_COMMIT, PAGE_READWRITE);
        if ( ! limit) throw std::bad_alloc();

        std::memset( limit, size_, '\0');

        DWORD old_options;
        const BOOL result = ::VirtualProtect(
            limit, pagesize(), PAGE_READWRITE | PAGE_GUARD /*PAGE_NOACCESS*/, & old_options);
        BOOST_ASSERT( FALSE != result);

        return static_cast< char * >( limit) + size_;
    }
开发者ID:0xbda2d2f8,项目名称:zpublic,代码行数:21,代码来源:stack_allocator_windows.hpp

示例9: iscsi_tcp_segment_map

/**
 * iscsi_tcp_segment_map - map the current S/G page
 * @segment: iscsi_segment
 * @recv: 1 if called from recv path
 *
 * We only need to possibly kmap data if scatter lists are being used,
 * because the iscsi passthrough and internal IO paths will never use high
 * mem pages.
 */
static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
{
	struct scatterlist *sg;

	if (segment->data != NULL || !segment->sg)
		return;

	sg = segment->sg;
	BUG_ON(segment->sg_mapped);
	BUG_ON(sg->length == 0);

	/*
	 * If the page count is greater than one it is ok to send
	 * to the network layer's zero copy send path. If not we
	 * have to go the slow sendmsg path. We always map for the
	 * recv path.
	 */
	if (page_count(sg_page(sg)) >= 1 && !recv)
		return;

<<<<<<< HEAD
开发者ID:Core2idiot,项目名称:Kernel-Samsung-3.0...-,代码行数:30,代码来源:libiscsi_tcp.c

示例10: drm_ttm_free_alloced_pages

static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
{
	int i;
	struct drm_buffer_manager *bm = &ttm->dev->bm;
	struct page **cur_page;

	for (i = 0; i < ttm->num_pages; ++i) {
		cur_page = ttm->pages + i;
		if (*cur_page) {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
			ClearPageReserved(*cur_page);
#endif
			if (page_count(*cur_page) != 1)
				DRM_ERROR("Erroneous page count. Leaking pages.\n");
			if (page_mapped(*cur_page))
				DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
			__free_page(*cur_page);
			--bm->cur_pages;
		}
	}
}
开发者ID:jobi,项目名称:drm-psb,代码行数:21,代码来源:drm_ttm.c

示例11: __test_page_isolated_in_pageblock

/*
 * Test all pages in the range is free(means isolated) or not.
 * all pages in [start_pfn...end_pfn) must be in the same zone.
 * zone->lock must be held before call this.
 *
 * Returns 1 if all pages in the range are isolated.
 */
static int
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
{
	struct page *page;

	while (pfn < end_pfn) {
		if (!pfn_valid_within(pfn)) {
			pfn++;
			continue;
		}
		page = pfn_to_page(pfn);
		if (PageBuddy(page)) {
			/*
			 * If race between isolatation and allocation happens,
			 * some free pages could be in MIGRATE_MOVABLE list
			 * although pageblock's migratation type of the page
			 * is MIGRATE_ISOLATE. Catch it and move the page into
			 * MIGRATE_ISOLATE list.
			 */
			if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
				struct page *end_page;

				end_page = page + (1 << page_order(page)) - 1;
				move_freepages(page_zone(page), page, end_page,
						MIGRATE_ISOLATE);
			}
			pfn += 1 << page_order(page);
		}
		else if (page_count(page) == 0 &&
			get_freepage_migratetype(page) == MIGRATE_ISOLATE)
			pfn += 1;
		else
			break;
	}
	if (pfn < end_pfn)
		return 0;
	return 1;
}
开发者ID:GREYFOXRGR,项目名称:BPI-M3-bsp,代码行数:45,代码来源:page_isolation.c

示例12: __test_page_isolated_in_pageblock

/*
 * Test all pages in the range is free(means isolated) or not.
 * all pages in [start_pfn...end_pfn) must be in the same zone.
 * zone->lock must be held before call this.
 *
 * Returns 0 if all pages in the range is isolated.
 */
static int
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
{
	struct page *page;

	while (pfn < end_pfn) {
		if (!pfn_valid_within(pfn)) {
			pfn++;
			continue;
		}
		page = pfn_to_page(pfn);
		if (PageBuddy(page))
			pfn += 1 << page_order(page);
		else if (page_count(page) == 0 &&
				page_private(page) == MIGRATE_ISOLATE)
			pfn += 1;
		else
			break;
	}
	if (pfn < end_pfn)
		return 0;
	return 1;
}
开发者ID:12rafael,项目名称:jellytimekernel,代码行数:30,代码来源:page_isolation.c

示例13: hello_init

static int __init hello_init(void)
{
    int i, total, countUsedPage;
    struct page* curPage;

    char path[] = "./hash.bin";
    printk(KERN_INFO "Starting module. You have %lu pages to play with!\n", get_num_physpages());
    logFile = file_open(path, O_CREAT | O_WRONLY, S_IRWXU);
    curPage = pfn_to_page(node_data[0]->node_start_pfn);
    total = get_num_physpages();
    countUsedPage = 0;
    for(i=0; i<total; i++) {
        curPage = pfn_to_page(node_data[0]->node_start_pfn + i);
        if(page_count(curPage) > 0) {
            write_hash_to_file(countUsedPage, kmap(curPage));
            countUsedPage++;
        }
    }
    file_sync(logFile);
    file_close(logFile);
    printk(KERN_INFO "Save the world! countUsedPage:%d\n", countUsedPage);
    return 0;
}
开发者ID:Deependra-Patel,项目名称:memoryBuddies,代码行数:23,代码来源:hash.c

示例14: mlx4_en_destroy_allocator

static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
				      struct mlx4_en_rx_ring *ring)
{
	struct mlx4_en_rx_alloc *page_alloc;
	int i;

	for (i = 0; i < priv->num_frags; i++) {
		const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];

		page_alloc = &ring->page_alloc[i];
		en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
		       i, page_count(page_alloc->page));

		dma_unmap_page(priv->ddev, page_alloc->dma,
			       page_alloc->page_size, PCI_DMA_FROMDEVICE);
		while (page_alloc->page_offset + frag_info->frag_stride <
		       page_alloc->page_size) {
			put_page(page_alloc->page);
			page_alloc->page_offset += frag_info->frag_stride;
		}
		page_alloc->page = NULL;
	}
}
开发者ID:Cai900205,项目名称:test,代码行数:23,代码来源:en_rx.c

示例15: release_pages

/*
 * Batched page_cache_release().  Decrement the reference count on all the
 * passed pages.  If it fell to zero then remove the page from the LRU and
 * free it.
 *
 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
 * for the remainder of the operation.
 *
 * The locking in this function is against shrink_cache(): we recheck the
 * page count inside the lock to see whether shrink_cache grabbed the page
 * via the LRU.  If it did, give up: shrink_cache will free it.
 */
void release_pages(struct page **pages, int nr, int cold)
{
	int i;
	struct pagevec pages_to_free;
	struct zone *zone = NULL;

	pagevec_init(&pages_to_free, cold);
	for (i = 0; i < nr; i++) {
		struct page *page = pages[i];
		struct zone *pagezone;

		if (!put_page_testzero(page))
			continue;

		pagezone = page_zone(page);
		if (pagezone != zone) {
			if (zone)
				spin_unlock_irq(&zone->lru_lock);
			zone = pagezone;
			spin_lock_irq(&zone->lru_lock);
		}
		if (TestClearPageLRU(page))
			del_page_from_lru(zone, page);
		if (page_count(page) == 0) {
			if (!pagevec_add(&pages_to_free, page)) {
				spin_unlock_irq(&zone->lru_lock);
				__pagevec_free(&pages_to_free);
				pagevec_reinit(&pages_to_free);
				zone = NULL;	/* No lock is held */
			}
		}
	}
	if (zone)
		spin_unlock_irq(&zone->lru_lock);

	pagevec_free(&pages_to_free);
}
开发者ID:BackupTheBerlios,项目名称:tew632-brp-svn,代码行数:49,代码来源:swap.c


注:本文中的page_count函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。