本文整理汇总了C++中phys_to_page函数的典型用法代码示例。如果您正苦于以下问题:C++ phys_to_page函数的具体用法?C++ phys_to_page怎么用?C++ phys_to_page使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了phys_to_page函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: balong_ion_free_mem_to_buddy
STATIC int balong_ion_free_mem_to_buddy(void)
{
int i;
u32 fb_heap_phy = 0;
struct ion_heap_info_data mem_data;
if (0 != hisi_ion_get_heap_info(ION_FB_HEAP_ID, &mem_data)) {
balongfb_loge("fail to get ION_FB_HEAP_ID\n");
return -EINVAL;
}
if (0 == mem_data.heap_size) {
balongfb_loge("fb reserved size 0\n");
return -EINVAL;
}
fb_heap_phy = mem_data.heap_phy;
for(i = 0; i < ((mem_data.heap_size)/PAGE_SIZE); i++){
free_reserved_page(phys_to_page(mem_data.heap_phy));
#ifdef CONFIG_HIGHMEM
if (PageHighMem(phys_to_page(mem_data.heap_phy)))
totalhigh_pages += 1;
#endif
mem_data.heap_phy += PAGE_SIZE;
}
memblock_free(fb_heap_phy, mem_data.heap_size);
return 0;
}
示例2: bitfix_xor_page
static void bitfix_xor_page(phys_addr_t page_addr, u32 dest_cu)
{
phys_addr_t dest_page_addr = (page_addr & ~CU_MASK) |
(dest_cu << CU_OFFSET);
u32 *virt_page = kmap_atomic(phys_to_page(page_addr));
u32 *virt_dest_page = kmap_atomic(phys_to_page(dest_page_addr));
BUG_ON(page_addr & ~PAGE_MASK);
BUG_ON(dest_page_addr == page_addr);
bitfix_xor32(virt_dest_page, virt_page, PAGE_SIZE);
kunmap_atomic(virt_dest_page);
kunmap_atomic(virt_page);
}
示例3: __get_dma_pgprot
void *cma_map_kernel(u32 phys_addr, size_t size)
{
pgprot_t prot = __get_dma_pgprot(NULL, pgprot_kernel);
struct page *page = phys_to_page(phys_addr);
void *ptr = NULL;
if (unlikely(phys_addr < mem_start || phys_addr > mem_start + mem_size)) {
pr_err("%s(%d) err: phys_addr 0x%x invalid!\n", __func__, __LINE__, phys_addr);
return NULL;
}
// BUG_ON(unlikely(!pfn_valid(__phys_to_pfn(phys_addr))));
size = PAGE_ALIGN(size);
if (PageHighMem(page)) {
ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, __builtin_return_address(0));
if (!ptr) {
pr_err("%s(%d) err: __dma_alloc_remap failed!\n", __func__, __LINE__);
return NULL;
}
} else {
__dma_remap(page, size, prot);
ptr = page_address(page);
}
return ptr;
}
示例4: PAGE_ALIGN
static inline void *memdump_remap_type(unsigned long phys_addr, size_t size,
pgprot_t pgprot)
{
int i;
u8 *vaddr;
int npages = PAGE_ALIGN((phys_addr & (PAGE_SIZE - 1)) + size) >> PAGE_SHIFT;
unsigned long offset = phys_addr & (PAGE_SIZE - 1);
struct page **pages;
pages = vmalloc(sizeof(struct page *) * npages);
if (!pages) {
printk(KERN_ERR "%s: vmalloc return NULL!\n", __func__);
return NULL;
}
pages[0] = phys_to_page(phys_addr);
for (i = 0; i < npages - 1; i++) {
pages[i + 1] = pages[i] + 1;
}
vaddr = (u8 *) vmap(pages, npages, VM_MAP, pgprot);
if (vaddr == 0) {
printk(KERN_ERR "%s: vmap return NULL!\n", __func__);
} else {
vaddr += offset;
}
vfree(pages);
return (void *)vaddr;
}
示例5: msm_iommu_map_extra
int msm_iommu_map_extra(struct iommu_domain *domain,
unsigned long start_iova,
unsigned long size,
unsigned long page_size,
int cached)
{
int ret = 0;
int i = 0;
unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
unsigned long temp_iova = start_iova;
if (page_size == SZ_4K) {
struct scatterlist *sglist;
unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
struct page *dummy_page = phys_to_page(phy_addr);
sglist = kmalloc(sizeof(*sglist) * nrpages, GFP_KERNEL);
if (!sglist) {
ret = -ENOMEM;
goto out;
}
sg_init_table(sglist, nrpages);
for (i = 0; i < nrpages; i++)
sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
if (ret) {
pr_err("%s: could not map extra %lx in domain %p\n",
__func__, start_iova, domain);
}
kfree(sglist);
} else {
示例6: mon_dmapeek
char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
{
struct page *pg;
unsigned long flags;
unsigned char *map;
unsigned char *ptr;
/*
* On i386, a DMA handle is the "physical" address of a page.
* In other words, the bus address is equal to physical address.
* There is no IOMMU.
*/
pg = phys_to_page(dma_addr);
/*
* We are called from hardware IRQs in case of callbacks.
* But we can be called from softirq or process context in case
* of submissions. In such case, we need to protect KM_IRQ0.
*/
local_irq_save(flags);
map = kmap_atomic(pg, KM_IRQ0);
ptr = map + (dma_addr & (PAGE_SIZE-1));
memcpy(dst, ptr, len);
kunmap_atomic(map, KM_IRQ0);
local_irq_restore(flags);
return 0;
}
示例7: bitfix_prepare
/**
* Prepare for running bitfix.
*
* This will zero out bitfix memory in preparation for calling
* bitfix_process_page() on pages. It will also allocate some internal
* temporary memory that will be freed with bitfix_finish.
*
* This should be called each time before suspend.
*
* This function must be called before bitfix_does_overlap_reserved().
*/
void bitfix_prepare(void)
{
int i;
if (!bitfix_enabled)
return;
/*
* Chunk size must match. Set just in case someone was playing around
* with sysfs.
*/
s3c_pm_check_set_chunksize(CHUNK_SIZE);
/*
* We'd like pm-check to give us chunks in an order that such that we
* process all chunks with the same destination one right after another.
*/
s3c_pm_check_set_interleave_bytes(1 << CU_OFFSET);
/* Zero out the xor superchunk. */
for (i = 0; i < UPPER_LOOPS; i++) {
phys_addr_t base_addr = SDRAM_BASE + (i << UPPER_OFFSET);
phys_addr_t xor_superchunk_addr = base_addr +
(XOR_CU_NUM << CU_OFFSET);
u32 pgnum;
for (pgnum = 0; pgnum < (PAGES_PER_SUPERCHUNK); pgnum++) {
phys_addr_t addr = xor_superchunk_addr +
(pgnum * PAGE_SIZE);
void *virt = kmap_atomic(phys_to_page(addr));
memset(virt, 0, PAGE_SIZE);
kunmap_atomic(virt);
}
}
}
示例8: mon_dmapeek_vec
void mon_dmapeek_vec(const struct mon_reader_bin *rp,
unsigned int offset, dma_addr_t dma_addr, unsigned int length)
{
unsigned long flags;
unsigned int step_len;
struct page *pg;
unsigned char *map;
unsigned long page_off, page_len;
local_irq_save(flags);
while (length) {
/* compute number of bytes we are going to copy in this page */
step_len = length;
page_off = dma_addr & (PAGE_SIZE-1);
page_len = PAGE_SIZE - page_off;
if (page_len < step_len)
step_len = page_len;
/* copy data and advance pointers */
pg = phys_to_page(dma_addr);
map = kmap_atomic(pg, KM_IRQ0);
offset = mon_copy_to_buff(rp, offset, map + page_off, step_len);
kunmap_atomic(map, KM_IRQ0);
dma_addr += step_len;
length -= step_len;
}
local_irq_restore(flags);
}
示例9: WARN
static void *__alloc_from_pool(size_t size, struct page **ret_pages, gfp_t flags)
{
unsigned long val;
void *ptr = NULL;
int count = size >> PAGE_SHIFT;
int i;
if (!atomic_pool) {
WARN(1, "coherent pool not initialised!\n");
return NULL;
}
val = gen_pool_alloc(atomic_pool, size);
if (val) {
phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
for (i = 0; i < count ; i++) {
ret_pages[i] = phys_to_page(phys);
phys += 1 << PAGE_SHIFT;
}
ptr = (void *)val;
memset(ptr, 0, size);
}
return ptr;
}
示例10: num_pages_spanned
struct dma_pinned_list *dma_pin_kernel_iovec_pages(struct iovec *iov, size_t len)
{
struct dma_pinned_list *local_list;
struct page **pages;
int i, j;
int nr_iovecs = 0;
int iovec_len_used = 0;
int iovec_pages_used = 0;
/* determine how many iovecs/pages there are, up front */
do {
iovec_len_used += iov[nr_iovecs].iov_len;
iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
nr_iovecs++;
} while (iovec_len_used < len);
/* single kmalloc for pinned list, page_list[], and the page arrays */
local_list = kmalloc(sizeof(*local_list)
+ (nr_iovecs * sizeof (struct dma_page_list))
+ (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
if (!local_list)
goto out;
/* list of pages starts right after the page list array */
pages = (struct page **) &local_list->page_list[nr_iovecs];
local_list->nr_iovecs = 0;
for (i = 0; i < nr_iovecs; i++) {
struct dma_page_list *page_list = &local_list->page_list[i];
int offset;
len -= iov[i].iov_len;
if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
goto unpin;
page_list->nr_pages = num_pages_spanned(&iov[i]);
page_list->base_address = iov[i].iov_base;
page_list->pages = pages;
pages += page_list->nr_pages;
for (offset=0, j=0; j < page_list->nr_pages; j++, offset+=PAGE_SIZE) {
page_list->pages[j] = phys_to_page(__pa((unsigned int)page_list->base_address) + offset);
}
local_list->nr_iovecs = i + 1;
}
return local_list;
unpin:
kfree(local_list);
out:
return NULL;
}
示例11: tegra_move_framebuffer
/*
* Due to conflicting restrictions on the placement of the framebuffer,
* the bootloader is likely to leave the framebuffer pointed at a location
* in memory that is outside the grhost aperture. This function will move
* the framebuffer contents from a physical address that is anywher (lowmem,
* highmem, or outside the memory map) to a physical address that is outside
* the memory map.
*/
void tegra_move_framebuffer(unsigned long to, unsigned long from,
unsigned long size)
{
struct page *page;
void __iomem *to_io;
void *from_virt;
unsigned long i;
BUG_ON(PAGE_ALIGN((unsigned long)to) != (unsigned long)to);
BUG_ON(PAGE_ALIGN(from) != from);
BUG_ON(PAGE_ALIGN(size) != size);
to_io = ioremap(to, size);
if (!to_io) {
pr_err("%s: Failed to map target framebuffer\n", __func__);
return;
}
if (pfn_valid(page_to_pfn(phys_to_page(from)))) {
for (i = 0 ; i < size; i += PAGE_SIZE) {
page = phys_to_page(from + i);
from_virt = kmap(page);
memcpy(to_io + i, from_virt, PAGE_SIZE);
kunmap(page);
}
} else {
void __iomem *from_io = ioremap(from, size);
if (!from_io) {
pr_err("%s: Failed to map source framebuffer\n",
__func__);
goto out;
}
for (i = 0; i < size; i += 4)
writel(readl(from_io + i), to_io + i);
iounmap(from_io);
}
out:
iounmap(to_io);
}
示例12: ion_cma_get_sgtable
/*
* Create scatter-list for the already allocated DMA buffer.
* This function could be replaced by dma_common_get_sgtable
* as soon as it will avalaible.
*/
int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size)
{
struct page *page = phys_to_page((u32)handle);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (unlikely(ret))
return ret;
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
return 0;
}
示例13: cma_unmap_kernel
void cma_unmap_kernel(u32 phys_addr, size_t size, void *cpu_addr)
{
struct page *page = phys_to_page(phys_addr);
BUG_ON(unlikely(!pfn_valid(__phys_to_pfn(phys_addr))));
size = PAGE_ALIGN(size);
if (PageHighMem(page))
__dma_free_remap(cpu_addr, size);
else
__dma_remap(page, size, pgprot_kernel);
}
示例14: pgprot_noncached
void __iomem *mem_vmap(phys_addr_t pa, size_t size, struct page *pages[])
{
unsigned int num_pages = (size >> PAGE_SHIFT);
pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
int i;
for (i = 0; i < num_pages; i++) {
pages[i] = phys_to_page(pa);
pa += PAGE_SIZE;
}
return vmap(pages, num_pages, VM_MAP, prot);
}
示例15: ion_cma_get_sgtable
/*
* Create scatter-list for the already allocated DMA buffer.
* This function could be replaced by dma_common_get_sgtable
* as soon as it will avalaible.
*/
static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size)
{
struct page *page = phys_to_page(dma_to_phys(dev, handle));
struct scatterlist *sg;
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (unlikely(ret))
return ret;
sg = sgt->sgl;
sg_set_page(sg, page, PAGE_ALIGN(size), 0);
sg_dma_address(sg) = sg_phys(sg);
return 0;
}