本文整理汇总了C++中pfn_to_mfn函数的典型用法代码示例。如果您正苦于以下问题:C++ pfn_to_mfn函数的具体用法?C++ pfn_to_mfn怎么用?C++ pfn_to_mfn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pfn_to_mfn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: xen_load_gdt
static void xen_load_gdt(const struct desc_ptr *dtr)
{
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
unsigned long frames[pages];
int f;
/*
* A GDT can be up to 64k in size, which corresponds to 8192
* 8-byte entries, or 16 4k pages..
*/
BUG_ON(size > 65536);
BUG_ON(va & ~PAGE_MASK);
for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
int level;
pte_t *ptep;
unsigned long pfn, mfn;
void *virt;
/*
* The GDT is per-cpu and is in the percpu data area.
* That can be virtually mapped, so we need to do a
* page-walk to get the underlying MFN for the
* hypercall. The page can also be in the kernel's
* linear range, so we need to RO that mapping too.
*/
ptep = lookup_address(va, &level);
BUG_ON(ptep == NULL);
pfn = pte_pfn(*ptep);
mfn = pfn_to_mfn(pfn);
virt = __va(PFN_PHYS(pfn));
frames[f] = mfn;
make_lowmem_page_readonly((void *)va);
make_lowmem_page_readonly(virt);
}
if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
BUG();
}
示例2: xen_load_gdt
static void xen_load_gdt(const struct desc_ptr *dtr)
{
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
unsigned long frames[pages];
int f;
/*
*/
BUG_ON(size > 65536);
BUG_ON(va & ~PAGE_MASK);
for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
int level;
pte_t *ptep;
unsigned long pfn, mfn;
void *virt;
/*
*/
ptep = lookup_address(va, &level);
BUG_ON(ptep == NULL);
pfn = pte_pfn(*ptep);
mfn = pfn_to_mfn(pfn);
virt = __va(PFN_PHYS(pfn));
frames[f] = mfn;
make_lowmem_page_readonly((void *)va);
make_lowmem_page_readonly(virt);
}
if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
BUG();
}
示例3: clear_highpage
void clear_highpage(struct page *page)
{
void *kaddr;
if (likely(xen_feature(XENFEAT_highmem_assist))
&& PageHighMem(page)) {
struct mmuext_op meo;
meo.cmd = MMUEXT_CLEAR_PAGE;
meo.arg1.mfn = pfn_to_mfn(page_to_pfn(page));
if (HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
return;
}
kaddr = kmap_atomic(page, KM_USER0);
clear_page(kaddr);
kunmap_atomic(kaddr, KM_USER0);
}
示例4: set_foreign_p2m_mapping
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
int i, ret = 0;
pte_t *pte;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
if (kmap_ops) {
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
kmap_ops, count);
if (ret)
goto out;
}
for (i = 0; i < count; i++) {
unsigned long mfn, pfn;
/* Do not add to override if the map failed. */
if (map_ops[i].status)
continue;
if (map_ops[i].flags & GNTMAP_contains_pte) {
pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
(map_ops[i].host_addr & ~PAGE_MASK));
mfn = pte_mfn(*pte);
} else {
mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
pfn = page_to_pfn(pages[i]);
WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
ret = -ENOMEM;
goto out;
}
}
out:
return ret;
}
示例5: xen_pgd_unpin
/* Release a pagetables pages back as normal RW */
static void xen_pgd_unpin(pgd_t *pgd)
{
struct mmuext_op *op;
struct multicall_space mcs;
xen_mc_batch();
mcs = __xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = MMUEXT_UNPIN_TABLE;
op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
pgd_walk(pgd, unpin_page, TASK_SIZE);
xen_mc_issue(0);
}
示例6: kexec_allocate
int kexec_allocate(struct xc_dom_image *dom, xen_vaddr_t up_to)
{
unsigned long new_allocated = (up_to - dom->parms.virt_base) / PAGE_SIZE;
unsigned long i;
pages = realloc(pages, new_allocated * sizeof(*pages));
pages_mfns = realloc(pages_mfns, new_allocated * sizeof(*pages_mfns));
pages_moved2pfns = realloc(pages_moved2pfns, new_allocated * sizeof(*pages_moved2pfns));
for (i = allocated; i < new_allocated; i++) {
/* Exchange old page of PFN i with a newly allocated page. */
xen_pfn_t old_mfn = dom->p2m_host[i];
xen_pfn_t new_pfn;
xen_pfn_t new_mfn;
pages[i] = alloc_page();
memset((void*) pages[i], 0, PAGE_SIZE);
new_pfn = PHYS_PFN(to_phys(pages[i]));
pages_mfns[i] = new_mfn = pfn_to_mfn(new_pfn);
/*
* If PFN of newly allocated page (new_pfn) is less then currently
* requested PFN (i) then look for relevant PFN/MFN pair. In this
* situation dom->p2m_host[new_pfn] no longer contains proper MFN
* because original page with new_pfn was moved earlier
* to different location.
*/
for (; new_pfn < i; new_pfn = pages_moved2pfns[new_pfn]);
/* Store destination PFN of currently requested page. */
pages_moved2pfns[i] = new_pfn;
/* Put old page at new PFN */
dom->p2m_host[new_pfn] = old_mfn;
/* Put new page at PFN i */
dom->p2m_host[i] = new_mfn;
}
allocated = new_allocated;
return 0;
}
示例7: build_pagetable
void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
{
unsigned long start_address, end_address;
unsigned long pfn_to_map, pt_pfn = *start_pfn;
static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
unsigned long offset;
int count = 0;
pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START))
{
printk("WARNING: Mini-OS trying to use Xen virtual space. "
"Truncating memory from %dMB to ",
((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
*max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
printk("%dMB\n",
((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
}
示例8: xen_add_extra_mem
static void __init xen_add_extra_mem(u64 start, u64 size)
{
unsigned long pfn;
int i;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
/* Add new region. */
if (xen_extra_mem[i].size == 0) {
xen_extra_mem[i].start = start;
xen_extra_mem[i].size = size;
break;
}
/* Append to existing region. */
if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
xen_extra_mem[i].size += size;
break;
}
}
if (i == XEN_EXTRA_MEM_MAX_REGIONS)
printk(KERN_WARNING "Warning: not enough extra memory regions\n");
memblock_reserve(start, size);
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
xen_max_p2m_pfn = PFN_DOWN(start + size);
for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
continue;
WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
pfn, mfn);
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
}
示例9: xen_release_chunk
static unsigned long __init xen_release_chunk(unsigned long start,
unsigned long end)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
.domid = DOMID_SELF
};
unsigned long len = 0;
unsigned long pfn;
int ret;
for(pfn = start; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
/* Make sure pfn exists to start with */
if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
continue;
set_xen_guest_handle(reservation.extent_start, &mfn);
reservation.nr_extents = 1;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
&reservation);
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
if (ret == 1) {
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
len++;
}
}
printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n",
start, end, len);
return len;
}
static unsigned long __init xen_set_identity_and_release(
const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
phys_addr_t start = 0;
unsigned long released = 0;
unsigned long identity = 0;
const struct e820entry *entry;
int i;
/*
* Combine non-RAM regions and gaps until a RAM region (or the
* end of the map) is reached, then set the 1:1 map and
* release the pages (if available) in those non-RAM regions.
*
* The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1
* mapping. This is needed for some BIOSes that put (for
* example) the DMI tables in a reserved region that begins on
* a non-page boundary.
*/
for (i = 0, entry = list; i < map_size; i++, entry++) {
phys_addr_t end = entry->addr + entry->size;
if (entry->type == E820_RAM || i == map_size - 1) {
unsigned long start_pfn = PFN_DOWN(start);
unsigned long end_pfn = PFN_UP(end);
if (entry->type == E820_RAM)
end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) {
if (start_pfn < nr_pages)
released += xen_release_chunk(
start_pfn, min(end_pfn, nr_pages));
identity += set_phys_range_identity(
start_pfn, end_pfn);
}
start = end;
}
}
printk(KERN_INFO "Released %lu pages of unused memory\n", released);
printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
return released;
}
示例10: free_pmd_page
void free_pmd_page(unsigned long addr)
{
struct ptrpmd *newstruct = NULL;
struct ptrpmd *temp_head = NULL;
int i = 0;
int counter = 0;
newstruct = (struct ptrpmd *)kmalloc(sizeof(struct ptrpmd), GFP_KERNEL);
newstruct -> content = addr;
spin_lock(&pmd_cache_lock);
newstruct -> next = pmd_head;
pmd_head = newstruct;
temp_head = pmd_head;
/*free node */
if(pmd_used_counter)
pmd_used_counter--;
pmd_free_counter++;
if(pmd_used_counter)
{
//if((pmd_free_counter/pmd_used_counter>=3) && ((pmd_used_counter + pmd_free_counter) >= 1800))
//if((pmd_used_counter/pmd_free_counter < 8) && ((pmd_used_counter + pmd_free_counter) >= 600))
//if((pmd_used_counter/pmd_free_counter < 1) && (pmd_used_counter >= 42))
//if((pmd_free_counter/pmd_used_counter >= 4) && (pmd_used_counter >= 80))
//if((pmd_free_counter/pmd_used_counter >= 6) && ((pgd_used_counter + pgd_free_counter) >= 230))
//if((pmd_used_counter/pmd_free_counter < 2) && ((pgd_used_counter + pgd_free_counter) >= 80))
if((pmd_free_counter/pmd_used_counter > 1) && ((pmd_used_counter + pmd_free_counter) >= 40))
//if((pmd_free_counter/pmd_used_counter >= 5) && ((pmd_used_counter + pmd_free_counter) >= 200))
{
//counter = pmd_free_counter * 3 / 10;
counter = 0;
for(i=0;i<counter;i++)
{
pmd_head = pmd_head->next;
}
pmd_free_counter -= counter;
}
}
spin_unlock(&pmd_cache_lock);
if(counter != 0)
{
struct ptrpmd * newstructarray = NULL;
struct ptrpmd * newstructarray_head = NULL;
int rc = 1;
newstructarray = (struct ptrpmd *)kmalloc(sizeof(struct ptrpmd) * counter, GFP_KERNEL);
newstructarray_head = newstructarray;
for (i=0;i<counter;i++)
{
newstruct = temp_head;
temp_head = temp_head->next;
newstructarray[i].content = pfn_to_mfn(PFN_DOWN(__pa(newstruct->content)));
kfree(newstruct);
}
//hypercall newstructarray
rc = HYPERVISOR_pmd_op(newstructarray, counter);
//if (rc == 0)
//printk("pmd cache free success\n");
//else
//printk("pmd cache free error\n");
//free page to the buddy system
newstructarray = newstructarray_head;
for(i=0;i<counter;i++)
{
free_page(newstructarray[i].content);
}
//free newstructarray
kfree(newstructarray);
}
return;
}
示例11: map_data_for_request
static int map_data_for_request(struct vscsifrnt_info *info,
struct scsi_cmnd *sc,
struct vscsiif_request *ring_req,
struct vscsifrnt_shadow *shadow)
{
grant_ref_t gref_head;
struct page *page;
int err, ref, ref_cnt = 0;
int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE);
unsigned int i, off, len, bytes;
unsigned int data_len = scsi_bufflen(sc);
unsigned int data_grants = 0, seg_grants = 0;
struct scatterlist *sg;
unsigned long mfn;
struct scsiif_request_segment *seg;
ring_req->nr_segments = 0;
if (sc->sc_data_direction == DMA_NONE || !data_len)
return 0;
scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i)
data_grants += PFN_UP(sg->offset + sg->length);
if (data_grants > VSCSIIF_SG_TABLESIZE) {
if (data_grants > info->host->sg_tablesize) {
shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
"Unable to map request_buffer for command!\n");
return -E2BIG;
}
seg_grants = vscsiif_grants_sg(data_grants);
shadow->sg = kcalloc(data_grants,
sizeof(struct scsiif_request_segment), GFP_ATOMIC);
if (!shadow->sg)
return -ENOMEM;
}
seg = shadow->sg ? : ring_req->seg;
err = gnttab_alloc_grant_references(seg_grants + data_grants,
&gref_head);
if (err) {
kfree(shadow->sg);
shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
"gnttab_alloc_grant_references() error\n");
return -ENOMEM;
}
if (seg_grants) {
page = virt_to_page(seg);
off = (unsigned long)seg & ~PAGE_MASK;
len = sizeof(struct scsiif_request_segment) * data_grants;
while (len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC);
mfn = pfn_to_mfn(page_to_pfn(page));
gnttab_grant_foreign_access_ref(ref,
info->dev->otherend_id, mfn, 1);
shadow->gref[ref_cnt] = ref;
ring_req->seg[ref_cnt].gref = ref;
ring_req->seg[ref_cnt].offset = (uint16_t)off;
ring_req->seg[ref_cnt].length = (uint16_t)bytes;
page++;
len -= bytes;
off = 0;
ref_cnt++;
}
BUG_ON(seg_grants < ref_cnt);
seg_grants = ref_cnt;
}
scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
page = sg_page(sg);
off = sg->offset;
len = sg->length;
while (len > 0 && data_len > 0) {
/*
* sg sends a scatterlist that is larger than
* the data_len it wants transferred for certain
* IO sizes.
*/
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
bytes = min(bytes, data_len);
ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC);
mfn = pfn_to_mfn(page_to_pfn(page));
gnttab_grant_foreign_access_ref(ref,
info->dev->otherend_id, mfn, grant_ro);
shadow->gref[ref_cnt] = ref;
seg->gref = ref;
seg->offset = (uint16_t)off;
seg->length = (uint16_t)bytes;
page++;
//.........这里部分代码省略.........
示例12: xen_do_chunk
static unsigned long __init xen_do_chunk(unsigned long start,
unsigned long end, bool release)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
.domid = DOMID_SELF
};
unsigned long len = 0;
int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);
unsigned long pfn;
int ret;
for (pfn = start; pfn < end; pfn++) {
unsigned long frame;
unsigned long mfn = pfn_to_mfn(pfn);
if (release) {
/* Make sure pfn exists to start with */
if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
continue;
frame = mfn;
} else {
if (!xlated_phys && mfn != INVALID_P2M_ENTRY)
continue;
frame = pfn;
}
set_xen_guest_handle(reservation.extent_start, &frame);
reservation.nr_extents = 1;
ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
&reservation);
WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
release ? "release" : "populate", pfn, ret);
if (ret == 1) {
if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
if (release)
break;
set_xen_guest_handle(reservation.extent_start, &frame);
reservation.nr_extents = 1;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
&reservation);
break;
}
len++;
} else
break;
}
if (len)
printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
release ? "Freeing" : "Populating",
start, end, len,
release ? "freed" : "added");
return len;
}
static unsigned long __init xen_release_chunk(unsigned long start,
unsigned long end)
{
/*
* Xen already ballooned out the E820 non RAM regions for us
* and set them up properly in EPT.
*/
if (xen_feature(XENFEAT_auto_translated_physmap))
return end - start;
return xen_do_chunk(start, end, true);
}
static unsigned long __init xen_populate_chunk(
const struct e820entry *list, size_t map_size,
unsigned long max_pfn, unsigned long *last_pfn,
unsigned long credits_left)
{
const struct e820entry *entry;
unsigned int i;
unsigned long done = 0;
unsigned long dest_pfn;
for (i = 0, entry = list; i < map_size; i++, entry++) {
unsigned long s_pfn;
unsigned long e_pfn;
unsigned long pfns;
long capacity;
if (credits_left <= 0)
break;
if (entry->type != E820_RAM)
continue;
e_pfn = PFN_DOWN(entry->addr + entry->size);
/* We only care about E820 after the xen_start_info->nr_pages */
if (e_pfn <= max_pfn)
continue;
s_pfn = PFN_UP(entry->addr);
//.........这里部分代码省略.........
示例13: xen_suspend_domain
/*
* Top level routine to direct suspend/resume of a domain.
*/
void
xen_suspend_domain(void)
{
extern void rtcsync(void);
extern hrtime_t hres_last_tick;
mfn_t start_info_mfn;
ulong_t flags;
pfn_t pfn;
int i;
/*
* Check that we are happy to suspend on this hypervisor.
*/
if (xen_hypervisor_supports_solaris(XEN_SUSPEND_CHECK) == 0) {
cpr_err(CE_WARN, "Cannot suspend on this hypervisor "
"version: v%lu.%lu%s, need at least version v3.0.4 or "
"-xvm based hypervisor", XENVER_CURRENT(xv_major),
XENVER_CURRENT(xv_minor), XENVER_CURRENT(xv_ver));
return;
}
/*
* XXPV - Are we definitely OK to suspend by the time we've connected
* the handler?
*/
cpr_err(CE_NOTE, "Domain suspending for save/migrate");
SUSPEND_DEBUG("xen_suspend_domain\n");
/*
* suspend interrupts and devices
* XXPV - we use suspend/resume for both save/restore domains (like sun
* cpr) and for migration. Would be nice to know the difference if
* possible. For save/restore where down time may be a long time, we
* may want to do more of the things that cpr does. (i.e. notify user
* processes, shrink memory footprint for faster restore, etc.)
*/
xen_suspend_devices();
SUSPEND_DEBUG("xenbus_suspend\n");
xenbus_suspend();
pfn = hat_getpfnum(kas.a_hat, (caddr_t)xen_info);
start_info_mfn = pfn_to_mfn(pfn);
/*
* XXPV: cpu hotplug can hold this under a xenbus watch. Are we safe
* wrt xenbus being suspended here?
*/
mutex_enter(&cpu_lock);
/*
* Suspend must be done on vcpu 0, as no context for other CPUs is
* saved.
*
* XXPV - add to taskq API ?
*/
thread_affinity_set(curthread, 0);
kpreempt_disable();
SUSPEND_DEBUG("xen_start_migrate\n");
xen_start_migrate();
if (ncpus > 1)
suspend_cpus();
/*
* We can grab the ec_lock as it's a spinlock with a high SPL. Hence
* any holder would have dropped it to get through suspend_cpus().
*/
mutex_enter(&ec_lock);
/*
* From here on in, we can't take locks.
*/
SUSPEND_DEBUG("ec_suspend\n");
ec_suspend();
SUSPEND_DEBUG("gnttab_suspend\n");
gnttab_suspend();
flags = intr_clear();
xpv_time_suspend();
/*
* Currently, the hypervisor incorrectly fails to bring back
* powered-down VCPUs. Thus we need to record any powered-down VCPUs
* to prevent any attempts to operate on them. But we have to do this
* *after* the very first time we do ec_suspend().
*/
for (i = 1; i < ncpus; i++) {
if (cpu[i] == NULL)
continue;
if (cpu_get_state(cpu[i]) == P_POWEROFF)
CPUSET_ATOMIC_ADD(cpu_suspend_lost_set, i);
}
//.........这里部分代码省略.........
示例14: xennet_alloc_rx_buffers
static void xennet_alloc_rx_buffers(struct net_device *dev)
{
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
struct page *page;
int i, batch_target, notify;
RING_IDX req_prod = np->rx.req_prod_pvt;
grant_ref_t ref;
unsigned long pfn;
void *vaddr;
struct xen_netif_rx_request *req;
if (unlikely(!netif_carrier_ok(dev)))
return;
/*
* Allocate skbuffs greedily, even though we batch updates to the
* receive ring. This creates a less bursty demand on the memory
* allocator, so should reduce the chance of failed allocation requests
* both for ourself and for other kernel subsystems.
*/
batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto no_skb;
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page) {
kfree_skb(skb);
no_skb:
/* Any skbuffs queued for refill? Force them out. */
if (i != 0)
goto refill;
/* Could not allocate any skbuffs. Try again later. */
mod_timer(&np->rx_refill_timer,
jiffies + (HZ/10));
break;
}
skb_shinfo(skb)->frags[0].page = page;
skb_shinfo(skb)->nr_frags = 1;
__skb_queue_tail(&np->rx_batch, skb);
}
/* Is the batch large enough to be worthwhile? */
if (i < (np->rx_target/2)) {
if (req_prod > np->rx.sring->req_prod)
goto push;
return;
}
/* Adjust our fill target if we risked running out of buffers. */
if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
((np->rx_target *= 2) > np->rx_max_target))
np->rx_target = np->rx_max_target;
refill:
for (i = 0; ; i++) {
skb = __skb_dequeue(&np->rx_batch);
if (skb == NULL)
break;
skb->dev = dev;
id = xennet_rxidx(req_prod + i);
BUG_ON(np->rx_skbs[id]);
np->rx_skbs[id] = skb;
ref = gnttab_claim_grant_reference(&np->gref_rx_head);
BUG_ON((signed short)ref < 0);
np->grant_rx_ref[id] = ref;
pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
vaddr = page_address(skb_shinfo(skb)->frags[0].page);
req = RING_GET_REQUEST(&np->rx, req_prod + i);
gnttab_grant_foreign_access_ref(ref,
np->xbdev->otherend_id,
pfn_to_mfn(pfn),
0);
req->id = id;
req->gref = ref;
}
wmb(); /* barrier so backend seens requests */
/* Above is a suitable barrier to ensure backend will see requests. */
np->rx.req_prod_pvt = req_prod + i;
push:
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
if (notify)
notify_remote_via_irq(np->netdev->irq);
}
示例15: kexec
void kexec(void *kernel, long kernel_size, void *module, long module_size, char *cmdline, unsigned long flags)
{
struct xc_dom_image *dom;
int rc;
domid_t domid = DOMID_SELF;
xen_pfn_t pfn;
xc_interface *xc_handle;
unsigned long i;
void *seg;
xen_pfn_t boot_page_mfn = virt_to_mfn(&_boot_page);
char features[] = "";
struct mmu_update *m2p_updates;
unsigned long nr_m2p_updates;
DEBUG("booting with cmdline %s\n", cmdline);
xc_handle = xc_interface_open(0,0,0);
dom = xc_dom_allocate(xc_handle, cmdline, features);
dom->allocate = kexec_allocate;
/* We are using guest owned memory, therefore no limits. */
xc_dom_kernel_max_size(dom, 0);
xc_dom_ramdisk_max_size(dom, 0);
dom->kernel_blob = kernel;
dom->kernel_size = kernel_size;
dom->ramdisk_blob = module;
dom->ramdisk_size = module_size;
dom->flags = flags;
dom->console_evtchn = start_info.console.domU.evtchn;
dom->xenstore_evtchn = start_info.store_evtchn;
tpm_hash2pcr(dom, cmdline);
if ( (rc = xc_dom_boot_xen_init(dom, xc_handle, domid)) != 0 ) {
grub_printf("xc_dom_boot_xen_init returned %d\n", rc);
errnum = ERR_BOOT_FAILURE;
goto out;
}
if ( (rc = xc_dom_parse_image(dom)) != 0 ) {
grub_printf("xc_dom_parse_image returned %d\n", rc);
errnum = ERR_BOOT_FAILURE;
goto out;
}
#ifdef __i386__
if (strcmp(dom->guest_type, "xen-3.0-x86_32p")) {
grub_printf("can only boot x86 32 PAE kernels, not %s\n", dom->guest_type);
errnum = ERR_EXEC_FORMAT;
goto out;
}
#endif
#ifdef __x86_64__
if (strcmp(dom->guest_type, "xen-3.0-x86_64")) {
grub_printf("can only boot x86 64 kernels, not %s\n", dom->guest_type);
errnum = ERR_EXEC_FORMAT;
goto out;
}
#endif
/* equivalent of xc_dom_mem_init */
dom->arch_hooks = xc_dom_find_arch_hooks(xc_handle, dom->guest_type);
dom->total_pages = start_info.nr_pages;
/* equivalent of arch_setup_meminit */
/* setup initial p2m */
dom->p2m_host = malloc(sizeof(*dom->p2m_host) * dom->total_pages);
/* Start with our current P2M */
for (i = 0; i < dom->total_pages; i++)
dom->p2m_host[i] = pfn_to_mfn(i);
if ( (rc = xc_dom_build_image(dom)) != 0 ) {
grub_printf("xc_dom_build_image returned %d\n", rc);
errnum = ERR_BOOT_FAILURE;
goto out;
}
/* copy hypercall page */
/* TODO: domctl instead, but requires privileges */
if (dom->parms.virt_hypercall != -1) {
pfn = PHYS_PFN(dom->parms.virt_hypercall - dom->parms.virt_base);
memcpy((void *) pages[pfn], hypercall_page, PAGE_SIZE);
}
/* Equivalent of xc_dom_boot_image */
dom->shared_info_mfn = PHYS_PFN(start_info.shared_info);
if (!xc_dom_compat_check(dom)) {
grub_printf("xc_dom_compat_check failed\n");
errnum = ERR_EXEC_FORMAT;
goto out;
}
/* Move current console, xenstore and boot MFNs to the allocated place */
do_exchange(dom, dom->console_pfn, start_info.console.domU.mfn);
do_exchange(dom, dom->xenstore_pfn, start_info.store_mfn);
//.........这里部分代码省略.........