本文整理汇总了C++中STAM_COUNTER_INC函数的典型用法代码示例。如果您正苦于以下问题:C++ STAM_COUNTER_INC函数的具体用法?C++ STAM_COUNTER_INC怎么用?C++ STAM_COUNTER_INC使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了STAM_COUNTER_INC函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: DECLINLINE
/**
* Internal - statistics only.
*/
DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
{
#ifdef VBOX_WITH_STATISTICS
switch (cb)
{
case 1:
STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
break;
case 2:
STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
break;
case 4:
STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
break;
case 8:
STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
break;
default:
/* No way. */
AssertMsgFailed(("Invalid data length %d\n", cb));
break;
}
#else
NOREF(pVM); NOREF(cb);
#endif
}
示例2: emR3HmExecuteIOInstruction
/**
* Executes one (or perhaps a few more) IO instruction(s).
*
* @returns VBox status code suitable for EM.
* @param pVM The cross context VM structure.
* @param pVCpu The cross context virtual CPU structure.
*/
static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
{
PCPUMCTX pCtx = pVCpu->em.s.pCtx;
STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
/*
* Try to restart the io instruction that was refused in ring-0.
*/
VBOXSTRICTRC rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
if (IOM_SUCCESS(rcStrict))
{
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
return VBOXSTRICTRC_TODO(rcStrict); /* rip already updated. */
}
AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict));
/*
* Hand it over to the interpreter.
*/
rcStrict = IEMExecOne(pVCpu);
LogFlow(("emR3HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem);
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
return VBOXSTRICTRC_TODO(rcStrict);
}
示例3: selmGuestGDTWriteHandler
selmGuestGDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
{
Assert(enmAccessType == PGMACCESSTYPE_WRITE);
NOREF(enmAccessType);
Log(("selmGuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf));
NOREF(GCPtr);
NOREF(cbBuf);
NOREF(pvPtr);
NOREF(pvBuf);
NOREF(enmOrigin);
NOREF(pvUser);
# ifdef IN_RING3
VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
return VINF_PGM_HANDLER_DO_DEFAULT;
# else /* IN_RC: */
/*
* Execute the write, doing necessary pre and post shadow GDT checks.
*/
PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
uint32_t offGuestGdt = pCtx->gdtr.pGdt - GCPtr;
selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
memcpy(pvBuf, pvPtr, cbBuf);
VBOXSTRICTRC rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
else
STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
return rcStrict;
# endif
}
示例4: emR3HmExecuteIOInstruction
/**
* Executes one (or perhaps a few more) IO instruction(s).
*
* @returns VBox status code suitable for EM.
* @param pVM The cross context VM structure.
* @param pVCpu The cross context virtual CPU structure.
*/
static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
{
RT_NOREF(pVM);
STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
VBOXSTRICTRC rcStrict;
uint32_t idxContinueExitRec = pVCpu->em.s.idxContinueExitRec;
RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
if (idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
{
/*
* Hand it over to the interpreter.
*/
CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
rcStrict = IEMExecOne(pVCpu);
LogFlow(("emR3HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
}
else
{
RT_UNTRUSTED_VALIDATED_FENCE();
CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0);
LogFlow(("emR3HmExecuteIOInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict)));
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
}
STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem);
STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
return VBOXSTRICTRC_TODO(rcStrict);
}
示例5: sbappend
/*
* Try and write() to the socket, whatever doesn't get written
* append to the buffer... for a host with a fast net connection,
* this prevents an unnecessary copy of the data
* (the socket is non-blocking, so we won't hang)
*/
void
sbappend(PNATState pData, struct socket *so, struct mbuf *m)
{
int ret = 0;
int mlen = 0;
STAM_PROFILE_START(&pData->StatIOSBAppend_pf, a);
LogFlow(("sbappend: so = %lx, m = %lx, m->m_len = %d\n", (long)so, (long)m, m ? m->m_len : 0));
STAM_COUNTER_INC(&pData->StatIOSBAppend);
/* Shouldn't happen, but... e.g. foreign host closes connection */
mlen = m_length(m, NULL);
if (mlen <= 0)
{
STAM_COUNTER_INC(&pData->StatIOSBAppend_zm);
goto done;
}
/*
* If there is urgent data, call sosendoob
* if not all was sent, sowrite will take care of the rest
* (The rest of this function is just an optimisation)
*/
if (so->so_urgc)
{
sbappendsb(pData, &so->so_rcv, m);
m_freem(pData, m);
sosendoob(so);
return;
}
/*
* We only write if there's nothing in the buffer,
* otherwise it'll arrive out of order, and hence corrupt
*/
if (so->so_rcv.sb_cc == 0)
{
caddr_t buf = NULL;
if (m->m_next)
{
buf = RTMemAlloc(mlen);
if (buf == NULL)
{
ret = 0;
goto no_sent;
}
m_copydata(m, 0, mlen, buf);
}
else
buf = mtod(m, char *);
ret = send(so->s, buf, mlen, 0);
if (m->m_next)
RTMemFree(buf);
}
示例6: patmAddBranchToLookupCache
/**
* Adds branch pair to the lookup cache of the particular branch instruction
*
* @returns VBox status
* @param pVM Pointer to the VM.
* @param pJumpTableGC Pointer to branch instruction lookup cache
* @param pBranchTarget Original branch target
* @param pRelBranchPatch Relative duplicated function address
*/
int patmAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch)
{
PPATCHJUMPTABLE pJumpTable;
Log(("PATMAddBranchToLookupCache: Adding (%RRv->%RRv (%RRv)) to table %RRv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));
AssertReturn(PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pJumpTableGC), VERR_INVALID_PARAMETER);
#ifdef IN_RC
pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
#else
pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
#endif
Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
if (pJumpTable->cAddresses < pJumpTable->nrSlots)
{
uint32_t i;
for (i=0;i<pJumpTable->nrSlots;i++)
{
if (pJumpTable->Slot[i].pInstrGC == 0)
{
pJumpTable->Slot[i].pInstrGC = pBranchTarget;
/* Relative address - eases relocation */
pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
pJumpTable->cAddresses++;
break;
}
}
AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
#ifdef VBOX_WITH_STATISTICS
STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
#endif
}
else
{
/* Replace an old entry. */
/** @todo replacement strategy isn't really bright. change to something better if required. */
Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
Assert((pJumpTable->nrSlots & 1) == 0);
pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC = pBranchTarget;
/* Relative address - eases relocation */
pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;
pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);
STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
}
return VINF_SUCCESS;
}
示例7: pdmR3R0CritSectEnterContended
/**
* Deals with the contended case in ring-3 and ring-0.
*
* @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
* @param pCritSect The critsect.
* @param hNativeSelf The native thread handle.
*/
static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
/*
* Start waiting.
*/
if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
# ifdef IN_RING3
STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
# else
STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
# endif
/*
* The wait loop.
*/
PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
# ifdef IN_RING3
# ifdef PDMCRITSECT_STRICT
RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
if (RT_FAILURE(rc2))
return rc2;
# else
RTTHREAD hThreadSelf = RTThreadSelf();
# endif
# endif
for (;;)
{
# ifdef PDMCRITSECT_STRICT
int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
if (RT_FAILURE(rc9))
return rc9;
# elif defined(IN_RING3)
RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
# endif
int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
# ifdef IN_RING3
RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
# endif
if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
return VERR_SEM_DESTROYED;
if (rc == VINF_SUCCESS)
return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
}
/* won't get here */
}
示例8: DECLINLINE
/**
* Record why we refused to use offsetted TSC.
*
* Used by TMCpuTickCanUseRealTSC and TMCpuTickGetDeadlineAndTscOffset.
*
* @param pVM The VM handle.
* @param pVCpu The current CPU.
*/
DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
{
/* Sample the reason for refusing. */
if (!pVM->tm.s.fMaybeUseOffsettedHostTSC)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
else if (!pVCpu->tm.s.fTSCTicking)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
else if (!pVM->tm.s.fTSCUseRealTSC)
{
if (pVM->tm.s.fVirtualSyncCatchUp)
{
if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
else
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
}
else if (!pVM->tm.s.fVirtualSyncTicking)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
else if (pVM->tm.s.fVirtualWarpDrive)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
}
}
示例9: DECLINLINE
/**
* Record why we refused to use offsetted TSC.
*
* Used by TMCpuTickCanUseRealTSC() and TMCpuTickGetDeadlineAndTscOffset().
*
* @param pVM The cross context VM structure.
* @param pVCpu The cross context virtual CPU structure of the calling EMT.
*/
DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
{
/* Sample the reason for refusing. */
if (pVM->tm.s.enmTSCMode != TMTSCMODE_DYNAMIC)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
else if (!pVCpu->tm.s.fTSCTicking)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
else if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
{
if (pVM->tm.s.fVirtualSyncCatchUp)
{
if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
else
STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
}
else if (!pVM->tm.s.fVirtualSyncTicking)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
else if (pVM->tm.s.fVirtualWarpDrive)
STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
}
}
示例10: VMMRCDECL
/**
* \#PF Virtual Handler callback for Guest write access to the Guest's own current IDT.
*
* @returns VBox status code (appropriate for trap handling and GC return).
* @param pVM Pointer to the VM.
* @param uErrorCode CPU Error code.
* @param pRegFrame Trap register frame.
* @param pvFault The fault address (cr2).
* @param pvRange The base address of the handled virtual range.
* @param offRange The offset of the access into this range.
* (If it's a EIP range this is the EIP, if not it's pvFault.)
*/
VMMRCDECL(int) trpmRCGuestIDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
{
PVMCPU pVCpu = VMMGetCpu0(pVM);
uint16_t cbIDT;
RTGCPTR GCPtrIDT = (RTGCPTR)CPUMGetGuestIDTR(pVCpu, &cbIDT);
#ifdef VBOX_STRICT
RTGCPTR GCPtrIDTEnd = (RTGCPTR)((RTGCUINTPTR)GCPtrIDT + cbIDT + 1);
#endif
uint32_t iGate = ((RTGCUINTPTR)pvFault - (RTGCUINTPTR)GCPtrIDT)/sizeof(VBOXIDTE);
AssertMsg(offRange < (uint32_t)cbIDT+1, ("pvFault=%RGv GCPtrIDT=%RGv-%RGv pvRange=%RGv\n", pvFault, GCPtrIDT, GCPtrIDTEnd, pvRange));
Assert((RTGCPTR)(RTRCUINTPTR)pvRange == GCPtrIDT);
NOREF(uErrorCode);
#if 0
/* Note! this causes problems in Windows XP as instructions following the update can be dangerous (str eax has been seen) */
/* Note! not going back to ring 3 could make the code scanner miss them. */
/* Check if we can handle the write here. */
if ( iGate != 3 /* Gate 3 is handled differently; could do it here as well, but let ring 3 handle this case for now. */
&& !ASMBitTest(&pVM->trpm.s.au32IdtPatched[0], iGate)) /* Passthru gates need special attention too. */
{
uint32_t cb;
int rc = EMInterpretInstructionEx(pVM, pVCpu, pRegFrame, pvFault, &cb);
if (RT_SUCCESS(rc) && cb)
{
uint32_t iGate1 = (offRange + cb - 1)/sizeof(VBOXIDTE);
Log(("trpmRCGuestIDTWriteHandler: write to gate %x (%x) offset %x cb=%d\n", iGate, iGate1, offRange, cb));
trpmClearGuestTrapHandler(pVM, iGate);
if (iGate != iGate1)
trpmClearGuestTrapHandler(pVM, iGate1);
STAM_COUNTER_INC(&pVM->trpm.s.StatRCWriteGuestIDTHandled);
return VINF_SUCCESS;
}
}
#else
NOREF(iGate);
#endif
Log(("trpmRCGuestIDTWriteHandler: eip=%RGv write to gate %x offset %x\n", pRegFrame->eip, iGate, offRange));
/** @todo Check which IDT entry and keep the update cost low in TRPMR3SyncIDT() and CSAMCheckGates(). */
VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
STAM_COUNTER_INC(&pVM->trpm.s.StatRCWriteGuestIDTFault);
return VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT;
}
示例11: pic_update_irq
/* raise irq to CPU if necessary. must be called every time the active
irq may change */
static int pic_update_irq(PDEVPIC pThis)
{
PicState *pics = &pThis->aPics[0];
int irq2, irq;
/* first look at slave pic */
irq2 = pic_get_irq(&pics[1]);
Log(("pic_update_irq irq2=%d\n", irq2));
if (irq2 >= 0) {
/* if irq request by slave pic, signal master PIC */
pic_set_irq1(&pics[0], 2, 1);
} else {
/* If not, clear the IR on the master PIC. */
pic_set_irq1(&pics[0], 2, 0);
}
/* look at requested irq */
irq = pic_get_irq(&pics[0]);
if (irq >= 0)
{
/* If irq 2 is pending on the master pic, then there must be one pending on the slave pic too! Otherwise we'll get
* spurious slave interrupts in picGetInterrupt.
*/
if (irq != 2 || irq2 != -1)
{
#if defined(DEBUG_PIC)
int i;
for(i = 0; i < 2; i++) {
Log(("pic%d: imr=%x irr=%x padd=%d\n",
i, pics[i].imr, pics[i].irr,
pics[i].priority_add));
}
Log(("pic: cpu_interrupt\n"));
#endif
pThis->CTX_SUFF(pPicHlp)->pfnSetInterruptFF(pThis->CTX_SUFF(pDevIns));
}
else
{
STAM_COUNTER_INC(&pThis->StatClearedActiveIRQ2);
Log(("pic_update_irq: irq 2 is active, but no interrupt is pending on the slave pic!!\n"));
/* Clear it here, so lower priority interrupts can still be dispatched. */
/* if this was the only pending irq, then we must clear the interrupt ff flag */
pThis->CTX_SUFF(pPicHlp)->pfnClearInterruptFF(pThis->CTX_SUFF(pDevIns));
/** @note Is this correct? */
pics[0].irr &= ~(1 << 2);
/* Call ourselves again just in case other interrupts are pending */
return pic_update_irq(pThis);
}
}
else
{
Log(("pic_update_irq: no interrupt is pending!!\n"));
/* we must clear the interrupt ff flag */
pThis->CTX_SUFF(pPicHlp)->pfnClearInterruptFF(pThis->CTX_SUFF(pDevIns));
}
return VINF_SUCCESS;
}
示例12: DECLINLINE
/**
* Read the current CPU timestamp counter.
*
* @returns Gets the CPU tsc.
* @param pVCpu The cross context virtual CPU structure.
* @param fCheckTimers Whether to check timers.
*/
DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
{
uint64_t u64;
if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
u64 = SUPReadTsc();
else
u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
u64 -= pVCpu->tm.s.offTSCRawSrc;
/* Always return a value higher than what the guest has already seen. */
if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
pVCpu->tm.s.u64TSCLastSeen = u64;
else
{
STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */
u64 = pVCpu->tm.s.u64TSCLastSeen;
}
}
else
u64 = pVCpu->tm.s.u64TSC;
return u64;
}
示例13: tmCpuTickResumeLocked
/**
* Resumes the CPU timestamp counter ticking.
*
* @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
* @param pVM The cross context VM structure.
* @param pVCpu The cross context virtual CPU structure.
*/
int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu)
{
if (!pVCpu->tm.s.fTSCTicking)
{
/* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */
pVCpu->tm.s.fTSCTicking = true;
uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking);
AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
if (c == 1)
{
/* The first VCPU to resume. */
uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc;
STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume);
/* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */
if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC;
else
pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
- pVM->tm.s.u64LastPausedTSC;
/* Calculate the offset for other VCPUs to use. */
pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld;
}
else
{
/* All other VCPUs (if any). */
pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause;
}
}
return VINF_SUCCESS;
}
示例14: mmPagePoolPtr2Phys
/**
* Converts a pool address to a physical address.
* The specified allocation type must match with the address.
*
* @returns Physical address.
* @returns NIL_RTHCPHYS if not found or eType is not matching.
* @param pPool Pointer to the page pool.
* @param pv The address to convert.
* @thread The Emulation Thread.
*/
RTHCPHYS mmPagePoolPtr2Phys(PMMPAGEPOOL pPool, void *pv)
{
#ifdef IN_RING3
VM_ASSERT_EMT(pPool->pVM);
#endif
/*
* Lookup the virtual address.
*/
PMMPPLOOKUPHCPTR pLookup = (PMMPPLOOKUPHCPTR)RTAvlPVGetBestFit(&pPool->pLookupVirt, pv, false);
if (pLookup)
{
unsigned iPage = ((char *)pv - (char *)pLookup->pSubPool->pvPages) >> PAGE_SHIFT;
if (iPage < pLookup->pSubPool->cPages)
{
/*
* Convert the virtual address to a physical address.
*/
STAM_COUNTER_INC(&pPool->cToPhysCalls);
AssertMsg( pLookup->pSubPool->paPhysPages[iPage].Phys
&& !(pLookup->pSubPool->paPhysPages[iPage].Phys & PAGE_OFFSET_MASK),
("Phys=%#x\n", pLookup->pSubPool->paPhysPages[iPage].Phys));
AssertMsg((uintptr_t)pLookup->pSubPool == pLookup->pSubPool->paPhysPages[iPage].uReserved,
("pSubPool=%p uReserved=%p\n", pLookup->pSubPool, pLookup->pSubPool->paPhysPages[iPage].uReserved));
return pLookup->pSubPool->paPhysPages[iPage].Phys + ((uintptr_t)pv & PAGE_OFFSET_MASK);
}
}
return NIL_RTHCPHYS;
}
示例15: sbappendsb
/*
* Copy the data from m into sb
* The caller is responsible to make sure there's enough room
*/
void
sbappendsb(PNATState pData, struct sbuf *sb, struct mbuf *m)
{
int len, n, nn;
#ifndef VBOX_WITH_STATISTICS
NOREF(pData);
#endif
len = m_length(m, NULL);
STAM_COUNTER_INC(&pData->StatIOSBAppendSB);
if (sb->sb_wptr < sb->sb_rptr)
{
STAM_COUNTER_INC(&pData->StatIOSBAppendSB_w_l_r);
n = sb->sb_rptr - sb->sb_wptr;
if (n > len)
n = len;
m_copydata(m, 0, n, sb->sb_wptr);
}
else
{
STAM_COUNTER_INC(&pData->StatIOSBAppendSB_w_ge_r);
/* Do the right edge first */
n = sb->sb_data + sb->sb_datalen - sb->sb_wptr;
if (n > len)
n = len;
m_copydata(m, 0, n, sb->sb_wptr);
len -= n;
if (len)
{
/* Now the left edge */
nn = sb->sb_rptr - sb->sb_data;
if (nn > len)
nn = len;
m_copydata(m, n, nn, sb->sb_data);
n += nn;
}
}
sb->sb_cc += n;
sb->sb_wptr += n;
if (sb->sb_wptr >= sb->sb_data + sb->sb_datalen)
{
STAM_COUNTER_INC(&pData->StatIOSBAppendSB_w_alter);
sb->sb_wptr -= sb->sb_datalen;
}
}