本文整理汇总了C++中PFN_UP函数的典型用法代码示例。如果您正苦于以下问题:C++ PFN_UP函数的具体用法?C++ PFN_UP怎么用?C++ PFN_UP使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PFN_UP函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mem_prof_init
static void __init mem_prof_init(void)
{
unsigned long start_pfn, holes, free_pfn;
const unsigned long zone_alignment = 1UL << (MAX_ORDER - 1);
unsigned long ul;
mem_prof_t *mp;
/* Node#0 SDRAM */
mp = &mem_prof[0];
mp->start_pfn = PFN_UP(CONFIG_MEMORY_START);
mp->pages = PFN_DOWN(memory_end - memory_start);
mp->holes = 0;
mp->free_pfn = PFN_UP(__pa(_end));
/* Node#1 internal SRAM */
mp = &mem_prof[1];
start_pfn = free_pfn = PFN_UP(CONFIG_IRAM_START);
holes = 0;
if (start_pfn & (zone_alignment - 1)) {
ul = zone_alignment;
while (start_pfn >= ul)
ul += zone_alignment;
start_pfn = ul - zone_alignment;
holes = free_pfn - start_pfn;
}
mp->start_pfn = start_pfn;
mp->pages = PFN_DOWN(CONFIG_IRAM_SIZE) + holes;
mp->holes = holes;
mp->free_pfn = PFN_UP(CONFIG_IRAM_START);
}
示例2: bootmem_init
void __init bootmem_init(void)
{
/* Reserve all memory below PHYS_OFFSET, as memory
* accounting doesn't work for pages below that address.
*
* If PHYS_OFFSET is zero reserve page at address 0:
* successfull allocations should never return NULL.
*/
if (PHYS_OFFSET)
memblock_reserve(0, PHYS_OFFSET);
else
memblock_reserve(0, 1);
early_init_fdt_scan_reserved_mem();
if (!memblock_phys_mem_size())
panic("No memory found!\n");
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = min(max_pfn, MAX_LOW_PFN);
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
memblock_dump_all();
}
示例3: xen_set_identity_and_release
static unsigned long __init xen_set_identity_and_release(
const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
phys_addr_t start = 0;
unsigned long released = 0;
unsigned long identity = 0;
const struct e820entry *entry;
int i;
int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);
/*
* Combine non-RAM regions and gaps until a RAM region (or the
* end of the map) is reached, then set the 1:1 map and
* release the pages (if available) in those non-RAM regions.
*
* The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1
* mapping. This is needed for some BIOSes that put (for
* example) the DMI tables in a reserved region that begins on
* a non-page boundary.
*/
for (i = 0, entry = list; i < map_size; i++, entry++) {
phys_addr_t end = entry->addr + entry->size;
if (entry->type == E820_RAM || i == map_size - 1) {
unsigned long start_pfn = PFN_DOWN(start);
unsigned long end_pfn = PFN_UP(end);
if (entry->type == E820_RAM)
end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) {
if (xlated_phys) {
xen_pvh_adjust_stats(start_pfn,
end_pfn, &released, &identity,
nr_pages);
} else {
xen_set_identity_and_release_chunk(
start_pfn, end_pfn, nr_pages,
&released, &identity);
}
}
start = end;
}
}
if (released)
printk(KERN_INFO "Released %lu pages of unused memory\n", released);
if (identity)
printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
return released;
}
示例4: find_limits
static void __init find_limits(unsigned long *min, unsigned long *max_low,
unsigned long *max_high)
{
*max_low = PFN_DOWN(memblock_get_current_limit());
*min = PFN_UP(memblock_start_of_DRAM());
*max_high = PFN_DOWN(memblock_end_of_DRAM());
}
示例5: pcpu_populate_chunk
/**
* pcpu_populate_chunk - populate and map an area of a pcpu_chunk
* @chunk: chunk of interest
* @off: offset to the area to populate
* @size: size of the area to populate in bytes
*
* For each cpu, populate and map pages [@page_start,@page_end) into
* @chunk. The area is cleared on return.
*
* CONTEXT:
* pcpu_alloc_mutex, does GFP_KERNEL allocation.
*/
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
int page_start = PFN_DOWN(off);
int page_end = PFN_UP(off + size);
int free_end = page_start, unmap_end = page_start;
struct page **pages;
unsigned long *populated;
unsigned int cpu;
int rs, re, rc;
/* quick path, check whether all pages are already there */
rs = page_start;
pcpu_next_pop(chunk, &rs, &re, page_end);
if (rs == page_start && re == page_end)
goto clear;
/* need to allocate and map pages, this chunk can't be immutable */
WARN_ON(chunk->immutable);
pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
if (!pages)
return -ENOMEM;
/* alloc and map */
pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
if (rc)
goto err_free;
free_end = re;
}
示例6: setup_physmem
void __init setup_physmem(unsigned long start, unsigned long reserve_end,
unsigned long len, unsigned long long highmem)
{
unsigned long reserve = reserve_end - start;
int pfn = PFN_UP(__pa(reserve_end));
int delta = (len - reserve) >> PAGE_SHIFT;
int err, offset, bootmap_size;
physmem_fd = create_mem_file(len + highmem);
offset = uml_reserved - uml_physmem;
err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
len - offset, 1, 1, 1);
if (err < 0) {
printf("setup_physmem - mapping %ld bytes of memory at 0x%p "
"failed - errno = %d\n", len - offset,
(void *) uml_reserved, err);
exit(1);
}
/*
* Special kludge - This page will be mapped in to userspace processes
* from physmem_fd, so it needs to be written out there.
*/
os_seek_file(physmem_fd, __pa(&__syscall_stub_start));
os_write_file(physmem_fd, &__syscall_stub_start, PAGE_SIZE);
bootmap_size = init_bootmem(pfn, pfn + delta);
free_bootmem(__pa(reserve_end) + bootmap_size,
len - bootmap_size - reserve);
}
示例7: xen_count_remap_pages
static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
{
unsigned long extra = 0;
unsigned long start_pfn, end_pfn;
const struct e820entry *entry = xen_e820_map;
int i;
end_pfn = 0;
for (i = 0; i < xen_e820_map_entries; i++, entry++) {
start_pfn = PFN_DOWN(entry->addr);
/* Adjacent regions on non-page boundaries handling! */
end_pfn = min(end_pfn, start_pfn);
if (start_pfn >= max_pfn)
return extra + max_pfn - end_pfn;
/* Add any holes in map to result. */
extra += start_pfn - end_pfn;
end_pfn = PFN_UP(entry->addr + entry->size);
end_pfn = min(end_pfn, max_pfn);
if (entry->type != E820_RAM)
extra += end_pfn - start_pfn;
}
return extra;
}
示例8: pcpu_populate_chunk
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
int page_start = PFN_DOWN(off);
int page_end = PFN_UP(off + size);
int free_end = page_start, unmap_end = page_start;
struct page **pages;
unsigned long *populated;
unsigned int cpu;
int rs, re, rc;
rs = page_start;
pcpu_next_pop(chunk, &rs, &re, page_end);
if (rs == page_start && re == page_end)
goto clear;
WARN_ON(chunk->immutable);
pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
if (!pages)
return -ENOMEM;
pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
if (rc)
goto err_free;
free_end = re;
}
示例9: xen_find_pfn_range
/*
* Finds the next RAM pfn available in the E820 map after min_pfn.
* This function updates min_pfn with the pfn found and returns
* the size of that range or zero if not found.
*/
static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
{
const struct e820entry *entry = xen_e820_map;
unsigned int i;
unsigned long done = 0;
for (i = 0; i < xen_e820_map_entries; i++, entry++) {
unsigned long s_pfn;
unsigned long e_pfn;
if (entry->type != E820_RAM)
continue;
e_pfn = PFN_DOWN(entry->addr + entry->size);
/* We only care about E820 after this */
if (e_pfn <= *min_pfn)
continue;
s_pfn = PFN_UP(entry->addr);
/* If min_pfn falls within the E820 entry, we want to start
* at the min_pfn PFN.
*/
if (s_pfn <= *min_pfn) {
done = e_pfn - *min_pfn;
} else {
done = e_pfn - s_pfn;
*min_pfn = s_pfn;
}
break;
}
return done;
}
示例10: pcpu_depopulate_chunk
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
int page_start = PFN_DOWN(off);
int page_end = PFN_UP(off + size);
struct page **pages;
unsigned long *populated;
int rs, re;
rs = page_start;
pcpu_next_unpop(chunk, &rs, &re, page_end);
if (rs == page_start && re == page_end)
return;
WARN_ON(chunk->immutable);
pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
BUG_ON(!pages);
pcpu_pre_unmap_flush(chunk, page_start, page_end);
pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
pcpu_unmap_pages(chunk, pages, populated, rs, re);
pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
pcpu_free_pages(chunk, pages, populated, rs, re);
bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
}
示例11: pfn_is_nosave
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
示例12: tboot_probe
void __init tboot_probe(void)
{
tboot_shared_t *tboot_shared;
unsigned long p_tboot_shared;
uint32_t map_base, map_size;
unsigned long map_addr;
/* Look for valid page-aligned address for shared page. */
p_tboot_shared = simple_strtoul(opt_tboot, NULL, 0);
if ( (p_tboot_shared == 0) || ((p_tboot_shared & ~PAGE_MASK) != 0) )
return;
/* Map and check for tboot UUID. */
set_fixmap(FIX_TBOOT_SHARED_BASE, p_tboot_shared);
tboot_shared = (tboot_shared_t *)fix_to_virt(FIX_TBOOT_SHARED_BASE);
if ( tboot_shared == NULL )
return;
if ( memcmp(&tboot_shared_uuid, (uuid_t *)tboot_shared, sizeof(uuid_t)) )
return;
/* new tboot_shared (w/ GAS support, integrity, etc.) is not backwards
compatible */
if ( tboot_shared->version < 4 ) {
printk("unsupported version of tboot (%u)\n", tboot_shared->version);
return;
}
g_tboot_shared = tboot_shared;
printk("TBOOT: found shared page at phys addr %lx:\n", p_tboot_shared);
printk(" version: %d\n", tboot_shared->version);
printk(" log_addr: 0x%08x\n", tboot_shared->log_addr);
printk(" shutdown_entry: 0x%08x\n", tboot_shared->shutdown_entry);
printk(" tboot_base: 0x%08x\n", tboot_shared->tboot_base);
printk(" tboot_size: 0x%x\n", tboot_shared->tboot_size);
/* these will be needed by tboot_protect_mem_regions() and/or
tboot_parse_dmar_table(), so get them now */
map_base = PFN_DOWN(TXT_PUB_CONFIG_REGS_BASE);
map_size = PFN_UP(NR_TXT_CONFIG_PAGES * PAGE_SIZE);
map_addr = (unsigned long)__va(map_base << PAGE_SHIFT);
if ( map_pages_to_xen(map_addr, map_base, map_size, __PAGE_HYPERVISOR) )
return;
/* TXT Heap */
txt_heap_base =
*(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_BASE);
txt_heap_size =
*(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_SIZE);
/* SINIT */
sinit_base =
*(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_BASE);
sinit_size =
*(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_SIZE);
destroy_xen_mappings((unsigned long)__va(map_base << PAGE_SHIFT),
(unsigned long)__va((map_base + map_size) << PAGE_SHIFT));
}
示例13: mem_hole_size
static u64 __init mem_hole_size(u64 start, u64 end)
{
unsigned long start_pfn = PFN_UP(start);
unsigned long end_pfn = PFN_DOWN(end);
if (start_pfn < end_pfn)
return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
return 0;
}
示例14: init_initrd
/* it returns the next free pfn after initrd */
static unsigned long __init init_initrd(void)
{
unsigned long end;
/*
* Board specific code or command line parser should have
* already set up initrd_start and initrd_end. In these cases
* perfom sanity checks and use them if all looks good.
*/
if (!initrd_start || initrd_end <= initrd_start) {
#ifdef CONFIG_PROBE_INITRD_HEADER
u32 *initrd_header;
/*
* See if initrd has been added to the kernel image by
* arch/mips/boot/addinitrd.c. In that case a header is
* prepended to initrd and is made up by 8 bytes. The first
* word is a magic number and the second one is the size of
* initrd. Initrd start must be page aligned in any cases.
*/
initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
if (initrd_header[0] != 0x494E5244)
goto disable;
initrd_start = (unsigned long)(initrd_header + 2);
initrd_end = initrd_start + initrd_header[1];
#else
goto disable;
#endif
}
if (initrd_start & ~PAGE_MASK) {
pr_err("initrd start must be page aligned\n");
goto disable;
}
if (initrd_start < PAGE_OFFSET) {
pr_err("initrd start < PAGE_OFFSET\n");
goto disable;
}
/*
* Sanitize initrd addresses. For example firmware
* can't guess if they need to pass them through
* 64-bits values if the kernel has been built in pure
* 32-bit. We need also to switch from KSEG0 to XKPHYS
* addresses now, so the code can now safely use __pa().
*/
end = __pa(initrd_end);
initrd_end = (unsigned long)__va(end);
initrd_start = (unsigned long)__va(__pa(initrd_start));
ROOT_DEV = Root_RAM0;
return PFN_UP(end);
disable:
initrd_start = 0;
initrd_end = 0;
return 0;
}
示例15: bootmem_init
static void __init bootmem_init(void)
{
unsigned long start_pfn, bootmap_size;
unsigned long size = initrd_end - initrd_start;
start_pfn = PFN_UP(__pa(&_end));
min_low_pfn = PFN_UP(MEMORY_START);
max_low_pfn = PFN_UP(MEMORY_START + MEMORY_SIZE);
/* Initialize the boot-time allocator with low memory only. */
bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
min_low_pfn, max_low_pfn);
add_active_range(0, min_low_pfn, max_low_pfn);
free_bootmem(PFN_PHYS(start_pfn),
(max_low_pfn - start_pfn) << PAGE_SHIFT);
memory_present(0, start_pfn, max_low_pfn);
/* Reserve space for the bootmem bitmap. */
reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT);
if (size == 0) {
printk(KERN_INFO "Initrd not found or empty");
goto disable;
}
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
printk(KERN_ERR "Initrd extends beyond end of memory");
goto disable;
}
/* Reserve space for the initrd bitmap. */
reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
initrd_below_start_ok = 1;
pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
initrd_start, size);
return;
disable:
printk(KERN_CONT " - disabling initrd\n");
initrd_start = 0;
initrd_end = 0;
}