本文整理汇总了C++中PVMCPU类的典型用法代码示例。如果您正苦于以下问题:C++ PVMCPU类的具体用法?C++ PVMCPU怎么用?C++ PVMCPU使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PVMCPU类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: VMM_INT_DECL
/**
* Process the critical sections (both types) queued for ring-3 'leave'.
*
* @param pVCpu The cross context virtual CPU structure.
*/
VMM_INT_DECL(void) PDMCritSectBothFF(PVMCPU pVCpu)
{
uint32_t i;
Assert( pVCpu->pdm.s.cQueuedCritSectLeaves > 0
|| pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves > 0
|| pVCpu->pdm.s.cQueuedCritSectRwExclLeaves > 0);
/* Shared leaves. */
i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves;
pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves = 0;
while (i-- > 0)
{
# ifdef IN_RING3
PPDMCRITSECTRW pCritSectRw = pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i];
# else
PPDMCRITSECTRW pCritSectRw = (PPDMCRITSECTRW)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM),
pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i]);
# endif
pdmCritSectRwLeaveSharedQueued(pCritSectRw);
LogFlow(("PDMR3CritSectFF: %p (R/W)\n", pCritSectRw));
}
/* Last, exclusive leaves. */
i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves;
pVCpu->pdm.s.cQueuedCritSectRwExclLeaves = 0;
while (i-- > 0)
{
# ifdef IN_RING3
PPDMCRITSECTRW pCritSectRw = pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i];
# else
PPDMCRITSECTRW pCritSectRw = (PPDMCRITSECTRW)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM),
pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i]);
# endif
pdmCritSectRwLeaveExclQueued(pCritSectRw);
LogFlow(("PDMR3CritSectFF: %p (R/W)\n", pCritSectRw));
}
/* Normal leaves. */
i = pVCpu->pdm.s.cQueuedCritSectLeaves;
pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
while (i-- > 0)
{
# ifdef IN_RING3
PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectLeaves[i];
# else
PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectLeaves[i]);
# endif
PDMCritSectLeave(pCritSect);
LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
}
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
}
示例2: DECLINLINE
/**
* Read the current CPU timestamp counter.
*
* @returns Gets the CPU tsc.
* @param pVCpu The VMCPU to operate on.
*/
DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
{
uint64_t u64;
if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (pVM->tm.s.fTSCVirtualized)
{
if (pVM->tm.s.fTSCUseRealTSC)
u64 = ASMReadTSC();
else
u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
u64 -= pVCpu->tm.s.offTSCRawSrc;
}
else
u64 = ASMReadTSC();
/* Never return a value lower than what the guest has already seen. */
if (u64 < pVCpu->tm.s.u64TSCLastSeen)
{
STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
pVCpu->tm.s.u64TSCLastSeen += 64; /* @todo choose a good increment here */
u64 = pVCpu->tm.s.u64TSCLastSeen;
}
}
else
u64 = pVCpu->tm.s.u64TSC;
return u64;
}
示例3: VMM_INT_DECL
/**
* MSR read handler for KVM.
*
* @returns Strict VBox status code like CPUMQueryGuestMsr().
* @retval VINF_CPUM_R3_MSR_READ
* @retval VERR_CPUM_RAISE_GP_0
*
* @param pVCpu Pointer to the VMCPU.
* @param idMsr The MSR being read.
* @param pRange The range this MSR belongs to.
* @param puValue Where to store the MSR value read.
*/
VMM_INT_DECL(VBOXSTRICTRC) gimKvmReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
{
NOREF(pRange);
PVM pVM = pVCpu->CTX_SUFF(pVM);
PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
switch (idMsr)
{
case MSR_GIM_KVM_SYSTEM_TIME:
case MSR_GIM_KVM_SYSTEM_TIME_OLD:
*puValue = pKvmCpu->u64SystemTimeMsr;
return VINF_SUCCESS;
case MSR_GIM_KVM_WALL_CLOCK:
case MSR_GIM_KVM_WALL_CLOCK_OLD:
*puValue = pKvm->u64WallClockMsr;
return VINF_SUCCESS;
default:
{
#ifdef IN_RING3
static uint32_t s_cTimes = 0;
if (s_cTimes++ < 20)
LogRel(("GIM: KVM: Unknown/invalid RdMsr (%#x) -> #GP(0)\n", idMsr));
#endif
LogFunc(("Unknown/invalid RdMsr (%#RX32) -> #GP(0)\n", idMsr));
break;
}
}
return VERR_CPUM_RAISE_GP_0;
}
示例4: DECLINLINE
/**
* Read the current CPU timestamp counter.
*
* @returns Gets the CPU tsc.
* @param pVCpu The cross context virtual CPU structure.
* @param fCheckTimers Whether to check timers.
*/
DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
{
uint64_t u64;
if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
u64 = SUPReadTsc();
else
u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
u64 -= pVCpu->tm.s.offTSCRawSrc;
/* Always return a value higher than what the guest has already seen. */
if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
pVCpu->tm.s.u64TSCLastSeen = u64;
else
{
STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */
u64 = pVCpu->tm.s.u64TSCLastSeen;
}
}
else
u64 = pVCpu->tm.s.u64TSC;
return u64;
}
示例5: VMM_INT_DECL
/**
* Disassembles the instruction at RIP and if it's a hypercall
* instruction, performs the hypercall.
*
* @param pVCpu The cross context virtual CPU structure.
* @param pCtx Pointer to the guest-CPU context.
* @param pcbInstr Where to store the disassembled instruction length.
* Optional, can be NULL.
*
* @todo This interface should disappear when IEM/REM execution engines
* handle VMCALL/VMMCALL instructions to call into GIM when
* required. See @bugref{7270#c168}.
*/
VMM_INT_DECL(VBOXSTRICTRC) GIMExecHypercallInstr(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t *pcbInstr)
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
VMCPU_ASSERT_EMT(pVCpu);
if (RT_UNLIKELY(!GIMIsEnabled(pVM)))
return VERR_GIM_NOT_ENABLED;
unsigned cbInstr;
DISCPUSTATE Dis;
int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbInstr);
if (RT_SUCCESS(rc))
{
if (pcbInstr)
*pcbInstr = (uint8_t)cbInstr;
switch (pVM->gim.s.enmProviderId)
{
case GIMPROVIDERID_HYPERV:
return gimHvExecHypercallInstr(pVCpu, pCtx, &Dis);
case GIMPROVIDERID_KVM:
return gimKvmExecHypercallInstr(pVCpu, pCtx, &Dis);
default:
AssertMsgFailed(("GIMExecHypercallInstr: for provider %u not available/implemented\n", pVM->gim.s.enmProviderId));
return VERR_GIM_HYPERCALLS_NOT_AVAILABLE;
}
}
Log(("GIM: GIMExecHypercallInstr: Failed to disassemble CS:RIP=%04x:%08RX64. rc=%Rrc\n", pCtx->cs.Sel, pCtx->rip, rc));
return rc;
}
示例6: VMM_INT_DECL
/**
* Notifies VMM that paravirtualized hypercalls are now disabled.
*
* @param pVCpu Pointer to the VMCPU.
*/
VMM_INT_DECL(void) VMMHypercallsDisable(PVMCPU pVCpu)
{
/* If there is anything to do for raw-mode, do it here. */
#ifndef IN_RC
if (HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
HMHypercallsDisable(pVCpu);
#endif
}
示例7: VMM_INT_DECL
/**
* Gets the next deadline in host CPU clock ticks and the TSC offset if we can
* use the raw TSC.
*
* @returns The number of host CPU clock ticks to the next timer deadline.
* @param pVCpu The current CPU.
* @param poffRealTSC The offset against the TSC of the current CPU.
* @thread EMT(pVCpu).
* @remarks Superset of TMCpuTickCanUseRealTSC.
*/
VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, uint64_t *poffRealTSC)
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
uint64_t cTicksToDeadline;
/*
* We require:
* 1. A fixed TSC, this is checked at init time.
* 2. That the TSC is ticking (we shouldn't be here if it isn't)
* 3. Either that we're using the real TSC as time source or
* a) we don't have any lag to catch up, and
* b) the virtual sync clock hasn't been halted by an expired timer, and
* c) we're not using warp drive (accelerated virtual guest time).
*/
if ( pVM->tm.s.fMaybeUseOffsettedHostTSC
&& RT_LIKELY(pVCpu->tm.s.fTSCTicking)
&& ( pVM->tm.s.fTSCUseRealTSC
|| ( !pVM->tm.s.fVirtualSyncCatchUp
&& RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
&& !pVM->tm.s.fVirtualWarpDrive))
)
{
*pfOffsettedTsc = true;
if (!pVM->tm.s.fTSCUseRealTSC)
{
/* The source is the timer synchronous virtual clock. */
Assert(pVM->tm.s.fTSCVirtualized);
uint64_t cNsToDeadline;
uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
: u64NowVirtSync;
u64Now -= pVCpu->tm.s.offTSCRawSrc;
*poffRealTSC = u64Now - ASMReadTSC();
cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline);
}
else
{
/* The source is the real TSC. */
if (pVM->tm.s.fTSCVirtualized)
*poffRealTSC = pVCpu->tm.s.offTSCRawSrc;
else
*poffRealTSC = 0;
cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
}
}
else
{
#ifdef VBOX_WITH_STATISTICS
tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
#endif
*pfOffsettedTsc = false;
*poffRealTSC = 0;
cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
}
return cTicksToDeadline;
}
示例8: VMM_INT_DECL
/**
* Handles the Hyper-V hypercall.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*/
VMM_INT_DECL(int) gimHvHypercall(PVMCPU pVCpu, PCPUMCTX pCtx)
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (!MSR_GIM_HV_HYPERCALL_IS_ENABLED(pVM->gim.s.u.Hv.u64HypercallMsr))
return VERR_GIM_HYPERCALLS_NOT_ENABLED;
/** @todo Handle hypercalls. Fail for now */
return VERR_GIM_IPE_3;
}
示例9: VMMDECL
/**
* Set the TPR (task priority register?).
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param u8TPR The new TPR.
*/
VMMDECL(int) PDMApicSetTPR(PVMCPU pVCpu, uint8_t u8TPR)
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns))
{
Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnSetTPR));
pdmLock(pVM);
pVM->pdm.s.Apic.CTX_SUFF(pfnSetTPR)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu, u8TPR);
pdmUnlock(pVM);
return VINF_SUCCESS;
}
return VERR_PDM_NO_APIC_INSTANCE;
}
示例10: VMMDECL
/**
* Checks the specified VCPU is the owner of the critical section.
*
* @returns true if owner.
* @returns false if not owner.
* @param pCritSect The critical section.
* @param pVCpu The virtual CPU handle.
*/
VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, PVMCPU pVCpu)
{
#ifdef IN_RING3
NOREF(pVCpu);
return RTCritSectIsOwner(&pCritSect->s.Core);
#else
Assert(&pVCpu->CTX_SUFF(pVM)->aCpus[pVCpu->idCpu] == pVCpu);
if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
return false;
return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
|| pCritSect->s.Core.cNestings > 1;
#endif
}
示例11: VMMRZ_INT_DECL
/**
* Prepares the host FPU/SSE/AVX stuff for IEM action.
*
* This will make sure the FPU/SSE/AVX guest state is _not_ loaded in the CPU.
* This will make sure the FPU/SSE/AVX host state is saved.
* Finally, it will make sure the FPU/SSE/AVX host features can be safely
* accessed.
*
* @param pVCpu The cross context virtual CPU structure.
*/
VMMRZ_INT_DECL(void) CPUMRZFpuStatePrepareHostCpuForUse(PVMCPU pVCpu)
{
pVCpu->cpum.s.fChanged |= CPUM_CHANGED_FPU_REM;
switch (pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST))
{
case 0:
#ifdef IN_RC
cpumRZSaveHostFPUState(&pVCpu->cpum.s);
VMCPU_FF_SET(pVCpu, VMCPU_FF_CPUM); /* Must recalc CR0 before executing more code! */
#else
if (cpumRZSaveHostFPUState(&pVCpu->cpum.s) == VINF_CPUM_HOST_CR0_MODIFIED)
HMR0NotifyCpumModifiedHostCr0(pVCpu);
#endif
Log6(("CPUMRZFpuStatePrepareHostCpuForUse: #0 - %#x\n", ASMGetCR0()));
break;
case CPUM_USED_FPU_HOST:
#ifdef IN_RC
VMCPU_FF_SET(pVCpu, VMCPU_FF_CPUM); /* (should be set already) */
#elif defined(IN_RING0) && ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
if (pVCpu->cpum.s.fUseFlags | CPUM_SYNC_FPU_STATE)
{
pVCpu->cpum.s.fUseFlags &= ~CPUM_SYNC_FPU_STATE;
HMR0NotifyCpumUnloadedGuestFpuState(pVCpu);
}
#endif
Log6(("CPUMRZFpuStatePrepareHostCpuForUse: #1 - %#x\n", ASMGetCR0()));
break;
case CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST:
#if defined(IN_RING0) && ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
HMR0SaveFPUState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
else
#endif
cpumRZSaveGuestFpuState(&pVCpu->cpum.s, true /*fLeaveFpuAccessible*/);
#ifdef IN_RING0
HMR0NotifyCpumUnloadedGuestFpuState(pVCpu);
#else
VMCPU_FF_SET(pVCpu, VMCPU_FF_CPUM); /* Must recalc CR0 before executing more code! */
#endif
Log6(("CPUMRZFpuStatePrepareHostCpuForUse: #2 - %#x\n", ASMGetCR0()));
break;
default:
AssertFailed();
}
}
示例12: VMM_INT_DECL
/**
* Check if the APIC has a pending interrupt/if a TPR change would active one.
*
* @returns VINF_SUCCESS or VERR_PDM_NO_APIC_INSTANCE.
* @param pVCpu Pointer to the VMCPU.
* @param pfPending Pending state (out).
*/
VMM_INT_DECL(int) PDMApicHasPendingIrq(PVMCPU pVCpu, bool *pfPending)
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns))
{
Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnSetTPR));
pdmLock(pVM);
*pfPending = pVM->pdm.s.Apic.CTX_SUFF(pfnHasPendingIrq)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu,
NULL /* pu8PendingIrq */);
pdmUnlock(pVM);
return VINF_SUCCESS;
}
return VERR_PDM_NO_APIC_INSTANCE;
}
示例13: VMM_INT_DECL
/**
* Returns whether the guest has configured and enabled calls to the hypervisor.
*
* @returns true if hypercalls are enabled and usable, false otherwise.
* @param pVCpu Pointer to the VMCPU.
*/
VMM_INT_DECL(bool) GIMAreHypercallsEnabled(PVMCPU pVCpu)
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (!GIMIsEnabled(pVM))
return false;
switch (pVM->gim.s.enmProviderId)
{
case GIMPROVIDERID_HYPERV:
return GIMHvAreHypercallsEnabled(pVCpu);
default:
return false;
}
}
示例14: VMMDECL
/**
* Get the TPR (task priority register).
*
* @returns The current TPR.
* @param pVCpu Pointer to the VMCPU.
* @param pu8TPR Where to store the TRP.
* @param pfPending Pending interrupt state (out).
*/
VMMDECL(int) PDMApicGetTPR(PVMCPU pVCpu, uint8_t *pu8TPR, bool *pfPending)
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns))
{
Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetTPR));
/* We don't acquire the PDM lock here as we're just reading information. Doing so causes massive
* contention as this function is called very often by each and every VCPU.
*/
*pu8TPR = pVM->pdm.s.Apic.CTX_SUFF(pfnGetTPR)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu);
if (pfPending)
*pfPending = pVM->pdm.s.Apic.CTX_SUFF(pfnHasPendingIrq)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns));
return VINF_SUCCESS;
}
*pu8TPR = 0;
return VERR_PDM_NO_APIC_INSTANCE;
}
示例15: VMMRZ_INT_DECL
/**
* Makes sure the YMM0..YMM15 and MXCSR state in CPUMCPU::Guest is up to date.
*
* This will not cause CPUM_USED_FPU_GUEST to change.
*
* @param pVCpu The cross context virtual CPU structure.
*/
VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPU pVCpu)
{
if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
{
#if defined(IN_RING0) && ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
{
Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
HMR0SaveFPUState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU_GUEST;
}
else
#endif
cpumRZSaveGuestAvxRegisters(&pVCpu->cpum.s);
Log7(("CPUMRZFpuStateActualizeAvxForRead\n"));
}
}