本文整理汇总了C++中pgprot_val函数的典型用法代码示例。如果您正苦于以下问题:C++ pgprot_val函数的具体用法?C++ pgprot_val怎么用?C++ pgprot_val使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pgprot_val函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rtR0MemObjLinuxVMap
/**
* Maps the allocation into ring-0.
*
* This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
*
* Contiguous mappings that isn't in 'high' memory will already be mapped into kernel
* space, so we'll use that mapping if possible. If execute access is required, we'll
* play safe and do our own mapping.
*
* @returns IPRT status code.
* @param pMemLnx The linux memory object to map.
* @param fExecutable Whether execute access is required.
*/
static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool fExecutable)
{
int rc = VINF_SUCCESS;
/*
* Choose mapping strategy.
*/
bool fMustMap = fExecutable
|| !pMemLnx->fContiguous;
if (!fMustMap)
{
size_t iPage = pMemLnx->cPages;
while (iPage-- > 0)
if (PageHighMem(pMemLnx->apPages[iPage]))
{
fMustMap = true;
break;
}
}
Assert(!pMemLnx->Core.pv);
Assert(!pMemLnx->fMappedToRing0);
if (fMustMap)
{
/*
* Use vmap - 2.4.22 and later.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
pgprot_t fPg;
pgprot_val(fPg) = _PAGE_PRESENT | _PAGE_RW;
# ifdef _PAGE_NX
if (!fExecutable)
pgprot_val(fPg) |= _PAGE_NX;
# endif
# ifdef VM_MAP
pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
# else
pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);
# endif
if (pMemLnx->Core.pv)
pMemLnx->fMappedToRing0 = true;
else
rc = VERR_MAP_FAILED;
#else /* < 2.4.22 */
rc = VERR_NOT_SUPPORTED;
#endif
}
else
{
/*
* Use the kernel RAM mapping.
*/
pMemLnx->Core.pv = phys_to_virt(page_to_phys(pMemLnx->apPages[0]));
Assert(pMemLnx->Core.pv);
}
return rc;
}
示例2: safl_mmap
safl_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
#endif
{
unsigned long size;
if (vma->vm_offset != 0)
return -EINVAL;
size = vma->vm_end - vma->vm_start;
if (size > FLASH_SZ)
return -EINVAL;
pgprot_val(vma->vm_page_prot) = pgprot_noncached(pgprot_val(vma->vm_page_prot));
#if LINUX_VERSION_CODE >= 0x020100
vma->vm_flags |= VM_IO;
#endif
if (remap_page_range(vma->vm_start, flash_addr, size, vma->vm_page_prot))
return -EAGAIN;
#if LINUX_VERSION_CODE < 0x020100
vma->vm_inode = inode;
inode->i_count++;
#endif
return 0;
}
示例3: pgprot_val
static struct
agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
unsigned long offset,
int size, pgprot_t page_prot)
{
struct agp_segment_priv *seg;
int num_segments, i;
off_t pg_start;
size_t pg_count;
pg_start = offset / 4096;
pg_count = size / 4096;
seg = *(client->segments);
num_segments = client->num_segments;
for (i = 0; i < client->num_segments; i++) {
if ((seg[i].pg_start == pg_start) &&
(seg[i].pg_count == pg_count) &&
(pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
return seg + i;
}
}
return NULL;
}
示例4: fb_mmap
static int
fb_mmap(struct inode *inode, struct file *file, struct vm_area_struct * vma)
{
struct fb_ops *fb = registered_fb[GET_FB_IDX(inode->i_rdev)];
struct fb_fix_screeninfo fix;
if (! fb)
return -ENODEV;
fb->fb_get_fix(&fix, PROC_CONSOLE());
if ((vma->vm_end - vma->vm_start + vma->vm_offset) > fix.smem_len)
return -EINVAL;
vma->vm_offset += fix.smem_start;
if (vma->vm_offset & ~PAGE_MASK)
return -ENXIO;
if (m68k_is040or060) {
pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
/* Use write-through cache mode */
pgprot_val(vma->vm_page_prot) |= _PAGE_CACHE040W;
}
if (remap_page_range(vma->vm_start, vma->vm_offset,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_inode = inode;
inode->i_count++;
return 0;
}
示例5: pci_mmap_legacy_page_range
/**
* pci_mmap_legacy_page_range - map legacy memory space to userland
* @bus: bus whose legacy space we're mapping
* @vma: vma passed in by mmap
*
* Map legacy memory space for this device back to userspace using a machine
* vector to get the base address.
*/
int
pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
{
unsigned long size = vma->vm_end - vma->vm_start;
pgprot_t prot;
char *addr;
/*
* Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
* for more details.
*/
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
return -EINVAL;
prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
vma->vm_page_prot);
if (pgprot_val(prot) != pgprot_val(pgprot_noncached(vma->vm_page_prot)))
return -EINVAL;
addr = pci_get_legacy_mem(bus);
if (IS_ERR(addr))
return PTR_ERR(addr);
vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
vma->vm_page_prot = prot;
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
size, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
示例6: set_up_temporary_text_mapping
static int set_up_temporary_text_mapping(pgd_t *pgd_base)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_base + pgd_index(restore_jump_address);
pmd = resume_one_md_table_init(pgd);
if (!pmd)
return -ENOMEM;
if (boot_cpu_has(X86_FEATURE_PSE)) {
set_pmd(pmd + pmd_index(restore_jump_address),
__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
} else {
pte = resume_one_page_table_init(pmd);
if (!pte)
return -ENOMEM;
set_pte(pte + pte_index(restore_jump_address),
__pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
}
return 0;
}
示例7: create_mapping_protection
/*
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
* executable, everything else can be mapped with the XN bits
* set. Also take the new (optional) RO/XP bits into account.
*/
static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
{
u64 attr = md->attribute;
u32 type = md->type;
if (type == EFI_MEMORY_MAPPED_IO)
return PROT_DEVICE_nGnRE;
if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
"UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
/*
* If the region is not aligned to the page size of the OS, we
* can not use strict permissions, since that would also affect
* the mapping attributes of the adjacent regions.
*/
return pgprot_val(PAGE_KERNEL_EXEC);
/* R-- */
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
(EFI_MEMORY_XP | EFI_MEMORY_RO))
return pgprot_val(PAGE_KERNEL_RO);
/* R-X */
if (attr & EFI_MEMORY_RO)
return pgprot_val(PAGE_KERNEL_ROX);
/* RW- */
if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE)
return pgprot_val(PAGE_KERNEL);
/* RWX */
return pgprot_val(PAGE_KERNEL_EXEC);
}
示例8: change_memory_common
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr;
unsigned long size = PAGE_SIZE*numpages;
unsigned long end = start + size;
struct vm_struct *area;
int i;
if (!PAGE_ALIGNED(addr)) {
start &= PAGE_MASK;
end = start + size;
WARN_ON_ONCE(1);
}
/*
* Kernel VA mappings are always live, and splitting live section
* mappings into page mappings may cause TLB conflicts. This means
* we have to ensure that changing the permission bits of the range
* we are operating on does not result in such splitting.
*
* Let's restrict ourselves to mappings created by vmalloc (or vmap).
* Those are guaranteed to consist entirely of page mappings, and
* splitting is never needed.
*
* So check whether the [addr, addr + size) interval is entirely
* covered by precisely one VM area that has the VM_ALLOC flag set.
*/
area = find_vm_area((void *)addr);
if (!area ||
end > (unsigned long)area->addr + area->size ||
!(area->flags & VM_ALLOC))
return -EINVAL;
if (!numpages)
return 0;
/*
* If we are manipulating read-only permissions, apply the same
* change to the linear mapping of the pages that back this VM area.
*/
if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
pgprot_val(clear_mask) == PTE_RDONLY)) {
for (i = 0; i < area->nr_pages; i++) {
__change_memory_common((u64)page_address(area->pages[i]),
PAGE_SIZE, set_mask, clear_mask);
}
}
/*
* Get rid of potentially aliasing lazily unmapped vm areas that may
* have permissions set that deviate from the ones we are setting here.
*/
vm_unmap_aliases();
return __change_memory_common(start, size, set_mask, clear_mask);
}
示例9: note_page
/*
* This function gets called on a break in a continuous series
* of PTE entries; the next one is different so we need to
* print what we collected so far.
*/
static void note_page(struct seq_file *m, struct pg_state *st,
pgprot_t new_prot, int level)
{
pgprotval_t prot, cur;
static const char units[] = "KMGTPE";
/*
* If we have a "break" in the series, we need to flush the state that
* we have now. "break" is either changing perms, levels or
* address space marker.
*/
prot = pgprot_val(new_prot) & PTE_FLAGS_MASK;
cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK;
if (!st->level) {
/* First entry */
st->current_prot = new_prot;
st->level = level;
st->marker = address_markers;
seq_printf(m, "---[ %s ]---\n", st->marker->name);
} else if (prot != cur || level != st->level ||
st->current_address >= st->marker[1].start_address) {
const char *unit = units;
unsigned long delta;
int width = sizeof(unsigned long) * 2;
/*
* Now print the actual finished series
*/
seq_printf(m, "0x%0*lx-0x%0*lx ",
width, st->start_address,
width, st->current_address);
delta = (st->current_address - st->start_address) >> 10;
while (!(delta & 1023) && unit[1]) {
delta >>= 10;
unit++;
}
seq_printf(m, "%9lu%c ", delta, *unit);
printk_prot(m, st->current_prot, st->level);
/*
* We print markers for special areas of address space,
* such as the start of vmalloc space etc.
* This helps in the interpretation.
*/
if (st->current_address >= st->marker[1].start_address) {
st->marker++;
seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
st->start_address = st->current_address;
st->current_prot = new_prot;
st->level = level;
}
}
示例10: cramfs_mmap
static int cramfs_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned long address, length;
struct inode *inode = file->f_dentry->d_inode;
struct super_block *sb = inode->i_sb;
/* this is only used in the case of read-only maps for XIP */
if (vma->vm_flags & VM_WRITE)
return generic_file_mmap(file, vma);
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
return -EINVAL;
address = PAGE_ALIGN(sb->CRAMFS_SB_LINEAR_PHYS_ADDR + OFFSET(inode));
address += vma->vm_pgoff << PAGE_SHIFT;
length = vma->vm_end - vma->vm_start;
if (length > inode->i_size)
length = inode->i_size;
length = PAGE_ALIGN(length);
#if 0
/* Doing the following makes it slower and more broken. bdl */
/*
* Accessing memory above the top the kernel knows about or
* through a file pointer that was marked O_SYNC will be
* done non-cached.
*/
vma->vm_page_prot =
__pgprot((pgprot_val(vma->vm_page_prot) & ~_CACHE_MASK)
| _CACHE_UNCACHED);
#endif
/*
* Don't dump addresses that are not real memory to a core file.
*/
vma->vm_flags |= VM_IO;
flush_tlb_page(vma, address);
if (remap_page_range(vma->vm_start, address, length,
vma->vm_page_prot))
return -EAGAIN;
#ifdef DEBUG_CRAMFS_XIP
printk("cramfs_mmap: mapped %s at 0x%08lx, length %lu to vma 0x%08lx"
", page_prot 0x%08lx\n",
file->f_dentry->d_name.name, address, length,
vma->vm_start, pgprot_val(vma->vm_page_prot));
#endif
return 0;
}
示例11: io_remap_pte_range
/* Remap IO memory, the same way as remap_pfn_range(), but use
* the obio memory space.
*
* They use a pgprot that sets PAGE_IO and does not check the
* mem_map table as this is independent of normal memory.
*/
static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
unsigned long address,
unsigned long size,
unsigned long offset, pgprot_t prot,
int space)
{
unsigned long end;
/* clear hack bit that was used as a write_combine side-effect flag */
offset &= ~0x1UL;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
pte_t entry;
unsigned long curend = address + PAGE_SIZE;
entry = mk_pte_io(offset, prot, space);
if (!(address & 0xffff)) {
if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
entry = mk_pte_io(offset,
__pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
space);
curend = address + 0x400000;
offset += 0x400000;
} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
entry = mk_pte_io(offset,
__pgprot(pgprot_val (prot) | _PAGE_SZ512K),
space);
curend = address + 0x80000;
offset += 0x80000;
} else if (!(offset & 0xfffe) && end >= address + 0x10000) {
entry = mk_pte_io(offset,
__pgprot(pgprot_val (prot) | _PAGE_SZ64K),
space);
curend = address + 0x10000;
offset += 0x10000;
} else
offset += PAGE_SIZE;
} else
offset += PAGE_SIZE;
do {
BUG_ON(!pte_none(*pte));
set_pte_at(mm, address, pte, entry);
address += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE;
pte++;
} while (address < curend);
} while (address < end);
}
示例12: setup_areas
static int __init setup_areas(struct spu *spu)
{
struct table {char* name; unsigned long addr; unsigned long size;};
unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO));
spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr,
sizeof(struct spe_shadow),
shadow_flags);
if (!spu_pdata(spu)->shadow) {
pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
goto fail_ioremap;
}
spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys,
LS_SIZE, pgprot_val(pgprot_noncached_wc(__pgprot(0))));
if (!spu->local_store) {
pr_debug("%s:%d: ioremap local_store failed\n",
__func__, __LINE__);
goto fail_ioremap;
}
spu->problem = ioremap(spu->problem_phys,
sizeof(struct spu_problem));
if (!spu->problem) {
pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
goto fail_ioremap;
}
spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
sizeof(struct spu_priv2));
if (!spu->priv2) {
pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
goto fail_ioremap;
}
dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr,
spu->problem_phys, spu->local_store_phys,
spu_pdata(spu)->shadow_addr);
dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2,
(unsigned long)spu->problem, (unsigned long)spu->local_store,
(unsigned long)spu_pdata(spu)->shadow);
return 0;
fail_ioremap:
spu_unmap(spu);
return -ENOMEM;
}
示例13: __change_page_attr
static int
__change_page_attr(struct page *page, pgprot_t prot)
{
pte_t *kpte;
unsigned long address;
struct page *kpte_page;
BUG_ON(PageHighMem(page));
address = (unsigned long)page_address(page);
kpte = lookup_address(address);
if (!kpte)
return -EINVAL;
kpte_page = virt_to_page(kpte);
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
set_pte_atomic(kpte, mk_pte(page, prot));
} else {
pgprot_t ref_prot;
struct page *split;
ref_prot =
((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
? PAGE_KERNEL_EXEC : PAGE_KERNEL;
split = split_large_page(address, prot, ref_prot);
if (!split)
return -ENOMEM;
set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
kpte_page = split;
}
page_private(kpte_page)++;
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
BUG_ON(page_private(kpte_page) == 0);
page_private(kpte_page)--;
} else
BUG();
/*
* If the pte was reserved, it means it was created at boot
* time (not via split_large_page) and in turn we must not
* replace it with a largepage.
*/
if (!PageReserved(kpte_page)) {
if (cpu_has_pse && (page_private(kpte_page) == 0)) {
ClearPagePrivate(kpte_page);
list_add(&kpte_page->lru, &df_list);
revert_page(kpte_page, address);
}
}
return 0;
}
示例14: io_remap_pte_range
/* Remap IO memory, the same way as remap_page_range(), but use
* the obio memory space.
*
* They use a pgprot that sets PAGE_IO and does not check the
* mem_map table as this is independent of normal memory.
*
* As a special hack if the lowest bit of offset is set the
* side-effect bit will be turned off. This is used as a
* performance improvement on FFB/AFB. -DaveM
*/
static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
unsigned long offset, pgprot_t prot, int space)
{
unsigned long end;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
pte_t oldpage;
pte_t entry;
unsigned long curend = address + PAGE_SIZE;
entry = mk_pte_io((offset & ~(0x1UL)), prot, space);
if (!(address & 0xffff)) {
if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
entry = mk_pte_io((offset & ~(0x1UL)),
__pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
space);
curend = address + 0x400000;
offset += 0x400000;
} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
entry = mk_pte_io((offset & ~(0x1UL)),
__pgprot(pgprot_val (prot) | _PAGE_SZ512K),
space);
curend = address + 0x80000;
offset += 0x80000;
} else if (!(offset & 0xfffe) && end >= address + 0x10000) {
entry = mk_pte_io((offset & ~(0x1UL)),
__pgprot(pgprot_val (prot) | _PAGE_SZ64K),
space);
curend = address + 0x10000;
offset += 0x10000;
} else
offset += PAGE_SIZE;
} else
offset += PAGE_SIZE;
if (offset & 0x1UL)
pte_val(entry) &= ~(_PAGE_E);
do {
oldpage = *pte;
pte_clear(pte);
set_pte(pte, entry);
forget_pte(oldpage);
address += PAGE_SIZE;
pte++;
} while (address < curend);
} while (address < end);
}
示例15: iomap_atomic_prot_pfn
/*
* Map 'pfn' using fixed map 'type' and protections 'prot'
*/
void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
{
/*
* For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
* PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
* MTRR is UC or WC. UC_MINUS gets the real intention, of the
* user, which is "WC if the MTRR is WC, UC if you can't do that."
*/
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
prot = PAGE_KERNEL_UC_MINUS;
return kmap_atomic_prot_pfn(pfn, type, prot);
}