本文整理汇总了C++中pfn_to_page函数的典型用法代码示例。如果您正苦于以下问题:C++ pfn_to_page函数的具体用法?C++ pfn_to_page怎么用?C++ pfn_to_page使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pfn_to_page函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: balloon_init
static int __init balloon_init(void)
{
unsigned long pfn, extra_pfn_end;
struct page *page;
if (!xen_pv_domain())
return -ENODEV;
pr_info("xen_balloon: Initialising balloon driver.\n");
balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
init_timer(&balloon_timer);
balloon_timer.data = 0;
balloon_timer.function = balloon_alarm;
register_balloon(&balloon_sysdev);
/*
* Initialise the balloon with excess memory space. We need
* to make sure we don't add memory which doesn't exist or
* logically exist. The E820 map can be trimmed to be smaller
* than the amount of physical memory due to the mem= command
* line parameter. And if this is a 32-bit non-HIGHMEM kernel
* on a system with memory which requires highmem to access,
* don't try to use it.
*/
extra_pfn_end = min(min(max_pfn, e820_end_of_ram_pfn()),
(unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size));
for (pfn = PFN_UP(xen_extra_mem_start);
pfn < extra_pfn_end;
pfn++) {
page = pfn_to_page(pfn);
/* totalram_pages and totalhigh_pages do not include the boot-time
balloon extension, so don't subtract from it. */
__balloon_append(page);
}
target_watch.callback = watch_target;
xenstore_notifier.notifier_call = balloon_init_watcher;
register_xenstore_notifier(&xenstore_notifier);
return 0;
}
示例2: hwpoison_inject
static int hwpoison_inject(void *data, u64 val)
{
unsigned long pfn = val;
struct page *p;
struct page *hpage;
int err;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!pfn_valid(pfn))
return -ENXIO;
p = pfn_to_page(pfn);
hpage = compound_head(p);
/*
* This implies unable to support free buddy pages.
*/
if (!get_page_unless_zero(hpage))
return 0;
if (!hwpoison_filter_enable)
goto inject;
if (!PageLRU(p) && !PageHuge(p))
shake_page(p, 0);
/*
* This implies unable to support non-LRU pages.
*/
if (!PageLRU(p) && !PageHuge(p))
return 0;
/*
* do a racy check with elevated page count, to make sure PG_hwpoison
* will only be set for the targeted owner (or on a free page).
* We temporarily take page lock for try_get_mem_cgroup_from_page().
* memory_failure() will redo the check reliably inside page lock.
*/
lock_page(hpage);
err = hwpoison_filter(hpage);
unlock_page(hpage);
if (err)
return 0;
inject:
printk(KERN_INFO "Injecting memory failure at pfn %lx\n", pfn);
return memory_failure(pfn, 18, MF_COUNT_INCREASED);
}
示例3: dma_generic_free_coherent
void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
int order = get_order(size);
unsigned long pfn = dma_handle >> PAGE_SHIFT;
int k;
if (!WARN_ON(!dev))
pfn += dev->dma_pfn_offset;
for (k = 0; k < (1 << order); k++)
__free_pages(pfn_to_page(pfn + k), 0);
iounmap(vaddr);
}
示例4: kbase_sync_to_cpu
void kbase_sync_to_cpu(phys_addr_t paddr, void *vaddr, size_t sz)
{
#ifdef CONFIG_ARM
__cpuc_flush_dcache_area(vaddr, sz);
outer_flush_range(paddr, paddr + sz);
#elif defined(CONFIG_ARM64)
/* FIXME (MID64-46): There's no other suitable cache flush function for ARM64 */
flush_cache_all();
#elif defined(CONFIG_X86)
struct scatterlist scl = { 0, };
sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz, paddr & (PAGE_SIZE - 1));
dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_FROM_DEVICE);
#else
#error Implement cache maintenance for your architecture here
#endif
}
示例5: filemap_sync_pte
/*
* Called with mm->page_table_lock held to protect against other
* threads/the swapper from ripping pte's out from under us.
*/
static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
pte_t pte = *ptep;
unsigned long pfn = pte_pfn(pte);
struct page *page;
if (pte_present(pte) && pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!PageReserved(page) &&
(ptep_clear_flush_dirty(vma, address, ptep) ||
page_test_and_clear_dirty(page)))
set_page_dirty(page);
}
return 0;
}
示例6: ptep_get_and_clear
/*
* For SH-4, we have our own implementation for ptep_get_and_clear
*/
inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
pte_clear(mm, addr, ptep);
if (!pte_not_present(pte)) {
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
struct address_space *mapping = page_mapping(page);
if (!mapping || !mapping_writably_mapped(mapping))
__clear_bit(PG_mapped, &page->flags);
}
}
return pte;
}
示例7: __update_cache
void __update_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte)
{
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (!boot_cpu_data.dcache.n_aliases)
return;
page = pfn_to_page(pfn);
if (pfn_valid(pfn)) {
int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
if (dirty)
__flush_purge_region(page_address(page), PAGE_SIZE);
}
}
示例8: balloon_init
static int __init balloon_init(void)
{
unsigned long pfn,num_physpages,max_pfn;
struct page *page;
if (!xen_domain())
return -ENODEV;
pr_info("xen_balloon: Initialising balloon driver.\n");
num_physpages = get_num_physpages();
if (xen_pv_domain())
max_pfn = xen_start_info->nr_pages;
else
max_pfn = num_physpages;
balloon_stats.current_pages = min(num_physpages,max_pfn);
totalram_bias = balloon_stats.current_pages - totalram_pages;
old_totalram_pages = totalram_pages;
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
balloon_stats.driver_pages = 0UL;
pr_info("current_pages=%luKB, totalram_pages=%luKB, totalram_bias=%luKB\n",balloon_stats.current_pages*4, totalram_pages*4, totalram_bias*4);
init_timer(&balloon_timer);
balloon_timer.data = 0;
balloon_timer.function = balloon_alarm;
register_balloon(&balloon_sysdev);
/* Initialise the balloon with excess memory space. */
#ifdef CONFIG_PVM
for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
page = pfn_to_page(pfn);
if (!PageReserved(page))
balloon_append(page);
}
#endif
target_watch.callback = watch_target;
xenstore_notifier.notifier_call = balloon_init_watcher;
register_xenstore_notifier(&xenstore_notifier);
return 0;
}
示例9: __test_page_isolated_in_pageblock
/*
* Test all pages in the range is free(means isolated) or not.
* all pages in [start_pfn...end_pfn) must be in the same zone.
* zone->lock must be held before call this.
*
* Returns 1 if all pages in the range are isolated.
*/
static int
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages)
{
struct page *page;
while (pfn < end_pfn) {
if (!pfn_valid_within(pfn)) {
pfn++;
continue;
}
page = pfn_to_page(pfn);
if (PageBuddy(page)) {
/*
* If race between isolatation and allocation happens,
* some free pages could be in MIGRATE_MOVABLE list
* although pageblock's migratation type of the page
* is MIGRATE_ISOLATE. Catch it and move the page into
* MIGRATE_ISOLATE list.
*/
if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
struct page *end_page;
end_page = page + (1 << page_order(page)) - 1;
move_freepages(page_zone(page), page, end_page,
MIGRATE_ISOLATE);
}
pfn += 1 << page_order(page);
}
else if (page_count(page) == 0 &&
get_freepage_migratetype(page) == MIGRATE_ISOLATE)
pfn += 1;
else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
/*
* The HWPoisoned page may be not in buddy
* system, and page_count() is not 0.
*/
pfn++;
continue;
}
else
break;
}
if (pfn < end_pfn)
return 0;
return 1;
}
示例10: pseries_remove_memblock
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
unsigned long start, start_pfn;
struct zone *zone;
int ret;
start_pfn = base >> PAGE_SHIFT;
if (!pfn_valid(start_pfn)) {
memblock_remove(base, memblock_size);
return 0;
}
zone = page_zone(pfn_to_page(start_pfn));
/*
* Remove section mappings and sysfs entries for the
* section of the memory we are removing.
*
* NOTE: Ideally, this should be done in generic code like
* remove_memory(). But remove_memory() gets called by writing
* to sysfs "state" file and we can't remove sysfs entries
* while writing to it. So we have to defer it to here.
*/
ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT);
if (ret)
return ret;
/*
* Update memory regions for memory remove
*/
memblock_remove(base, memblock_size);
/*
* Remove htab bolted mappings for this section of memory
*/
start = (unsigned long)__va(base);
ret = remove_section_mapping(start, start + memblock_size);
/* Ensure all vmalloc mappings are flushed in case they also
* hit that section of memory
*/
vm_unmap_aliases();
return ret;
}
示例11: store_soft_offline_page
/* Soft offline a page */
static ssize_t
store_soft_offline_page(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
u64 pfn;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
if (!pfn_valid(pfn))
return -ENXIO;
ret = soft_offline_page(pfn_to_page(pfn), 0);
return ret == 0 ? count : ret;
}
示例12: __dma_unmap_page
/*
* see if a mapped address was really a "safe" buffer and if so, copy
* the data from the safe buffer back to the unsafe buffer and free up
* the safe buffer. (basically return things back to the way they
* should be)
*/
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
struct safe_buffer *buf;
dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
__func__, dma_addr, size, dir);
buf = find_safe_buffer_dev(dev, dma_addr, __func__);
if (!buf) {
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
dma_addr & ~PAGE_MASK, size, dir);
return;
}
unmap_single(dev, buf, size, dir);
}
示例13: pme_mem_fops_fault
/* New fault method instead of nopage */
static int pme_mem_fops_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct page *pageptr;
unsigned long offset, physaddr, pageframe;
struct pme_fb_vma *mem_node = vma->vm_private_data;
int index = 0;
if (!mem_node)
return -1;
if (mem_node->type == fb_phys_mapped) {
/* Memory is mapped using the physical address method*/
offset = vma->vm_pgoff << PAGE_SHIFT;
physaddr = (unsigned long)vmf->virtual_address - vma->vm_start +
offset;
pageframe = physaddr >> PAGE_SHIFT;
pageptr = pfn_to_page(pageframe);
} else {
示例14: save_highmem_zone
static int save_highmem_zone(struct zone *zone)
{
unsigned long zone_pfn;
mark_free_pages(zone);
for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
struct page *page;
struct highmem_page *save;
void *kaddr;
unsigned long pfn = zone_pfn + zone->zone_start_pfn;
if (!(pfn%1000))
printk(".");
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
/*
* This condition results from rvmalloc() sans vmalloc_32()
* and architectural memory reservations. This should be
* corrected eventually when the cases giving rise to this
* are better understood.
*/
if (PageReserved(page)) {
printk("highmem reserved page?!\n");
continue;
}
BUG_ON(PageNosave(page));
if (PageNosaveFree(page))
continue;
save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
if (!save)
return -ENOMEM;
save->next = highmem_copy;
save->page = page;
save->data = (void *) get_zeroed_page(GFP_ATOMIC);
if (!save->data) {
kfree(save);
return -ENOMEM;
}
kaddr = kmap_atomic(page, KM_USER0);
memcpy(save->data, kaddr, PAGE_SIZE);
kunmap_atomic(kaddr, KM_USER0);
highmem_copy = save;
}
return 0;
}
示例15: swsusp_free
void swsusp_free(void)
{
struct zone *zone;
unsigned long zone_pfn;
for_each_zone(zone) {
for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
struct page *page;
page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
if (PageNosave(page) && PageNosaveFree(page)) {
ClearPageNosave(page);
ClearPageNosaveFree(page);
free_page((long) page_address(page));
}
}
}
}