本文整理汇总了C++中page_to_phys函数的典型用法代码示例。如果您正苦于以下问题:C++ page_to_phys函数的具体用法?C++ page_to_phys怎么用?C++ page_to_phys使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了page_to_phys函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: alloc_pages
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
{
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!page)
return NULL;
page->index = 0;
if (noexec) {
struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!shadow) {
__free_pages(page, ALLOC_ORDER);
return NULL;
}
page->index = page_to_phys(shadow);
}
return (unsigned long *) page_to_phys(page);
}
示例2: arc_dma_map_page
static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
phys_addr_t paddr = page_to_phys(page) + offset;
_dma_cache_sync(paddr, size, dir);
return plat_phys_to_dma(dev, paddr);
}
示例3: dma_map_page
dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
dma_addr_t handle = page_to_phys(page) + offset;
dma_sync_single_for_device(dev, handle, size, dir);
return handle;
}
示例4: tegra114_flush_dcache
static void tegra114_flush_dcache(struct page *page, unsigned long offset,
size_t size)
{
phys_addr_t phys = page_to_phys(page) + offset;
void *virt = page_address(page) + offset;
__cpuc_flush_dcache_area(virt, size);
outer_flush_range(phys, phys + size);
}
示例5: kvm_kaddr_to_phys
static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
{
if (!is_vmalloc_addr(kaddr)) {
BUG_ON(!virt_addr_valid(kaddr));
return __pa(kaddr);
} else {
return page_to_phys(vmalloc_to_page(kaddr)) +
offset_in_page(kaddr);
}
}
示例6: PAGE_ALIGN
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs)
{
unsigned long dma_mask;
struct page *page;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t addr;
dma_mask = dma_alloc_coherent_mask(dev, flag);
flag &= ~__GFP_ZERO;
again:
page = NULL;
/* CMA can be used only in the context which permits sleeping */
if (flag & __GFP_WAIT) {
page = dma_alloc_from_contiguous(dev, count, get_order(size));
if (page && page_to_phys(page) + size > dma_mask) {
dma_release_from_contiguous(dev, page, count);
page = NULL;
}
}
/* fallback */
if (!page)
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)
return NULL;
addr = page_to_phys(page);
if (addr + size > dma_mask) {
__free_pages(page, get_order(size));
if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
flag = (flag & ~GFP_DMA32) | GFP_DMA;
goto again;
}
return NULL;
}
memset(page_address(page), 0, size);
*dma_addr = addr;
return page_address(page);
}
示例7: alloc_pages
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
struct page *page = alloc_pages(pool->gfp_mask, pool->order);
if (!page)
return NULL;
dma_sync_single_for_device(NULL, (dma_addr_t)page_to_phys(page),
PAGE_SIZE << pool->order, DMA_BIDIRECTIONAL);
return page;
}
示例8: kmap_invalidate_coherent
static inline void kmap_invalidate_coherent(struct page *page,
unsigned long vaddr)
{
if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
unsigned long kvaddr;
if (!PageHighMem(page)) {
kvaddr = (unsigned long)page_to_virt(page);
__invalidate_dcache_page(kvaddr);
} else {
kvaddr = TLBTEMP_BASE_1 +
(page_to_phys(page) & DCACHE_ALIAS_MASK);
__invalidate_dcache_page_alias(kvaddr,
page_to_phys(page));
}
}
}
示例9: __do_machine_kexec
/*
* Do normal kexec
*/
static void __do_machine_kexec(void *data)
{
relocate_kernel_t data_mover;
struct kimage *image = data;
data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
/* Call the moving routine */
(*data_mover)(&image->head, image->start);
}
示例10: i460_insert_memory_small_io_page
static int i460_insert_memory_small_io_page (struct agp_memory *mem,
off_t pg_start, int type)
{
unsigned long paddr, io_pg_start, io_page_size;
int i, j, k, num_entries;
void *temp;
pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
mem, pg_start, type, page_to_phys(mem->pages[0]));
if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
return -EINVAL;
io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
temp = agp_bridge->current_size;
num_entries = A_SIZE_8(temp)->num_entries;
if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) {
printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
return -EINVAL;
}
j = io_pg_start;
while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) {
pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
j, RD_GATT(j));
return -EBUSY;
}
j++;
}
io_page_size = 1UL << I460_IO_PAGE_SHIFT;
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
paddr = page_to_phys(mem->pages[i]);
for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
}
WR_FLUSH_GATT(j - 1);
return 0;
}
示例11: __mic_dma_map_page
static dma_addr_t
__mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
void *va = phys_to_virt(page_to_phys(page)) + offset;
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
return mic_map_single(mdev, va, size);
}
示例12: alloc_pages
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
{
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!page)
return NULL;
page->index = 0;
if (noexec) {
struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!shadow) {
__free_pages(page, ALLOC_ORDER);
return NULL;
}
page->index = page_to_phys(shadow);
}
spin_lock(&mm->page_table_lock);
list_add(&page->lru, &mm->context.crst_list);
spin_unlock(&mm->page_table_lock);
return (unsigned long *) page_to_phys(page);
}
示例13: flush_cache_page
void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
unsigned long pfn)
{
unsigned long phys = page_to_phys(pfn_to_page(pfn));
unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(virt, phys);
__invalidate_icache_page_alias(virt, phys);
}
示例14: local_flush_cache_page
void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
{
/* Note that we have to use the 'alias' address to avoid multi-hit */
unsigned long phys = page_to_phys(pfn_to_page(pfn));
unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(virt, phys);
__invalidate_icache_page_alias(virt, phys);
}
示例15: pci_direct_map_sg
static int pci_direct_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
int i;
for (i = 0; i < nents; i++, sg++) {
sg->dma_address = page_to_phys(sg->page) + sg->offset;
sg->dma_length = sg->length;
}
return nents;
}