本文整理汇总了C++中CCallHelpers::codeBlock方法的典型用法代码示例。如果您正苦于以下问题:C++ CCallHelpers::codeBlock方法的具体用法?C++ CCallHelpers::codeBlock怎么用?C++ CCallHelpers::codeBlock使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CCallHelpers
的用法示例。
在下文中一共展示了CCallHelpers::codeBlock方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: adjustAndJumpToTarget
void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
{
#if ENABLE(GGC)
jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()->ownerExecutable()), GPRInfo::nonArgGPR0);
osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1);
InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get();
if (inlineCallFrames) {
for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) {
ScriptExecutable* ownerExecutable = inlineCallFrame->executable.get();
jit.move(AssemblyHelpers::TrustedImmPtr(ownerExecutable), GPRInfo::nonArgGPR0);
osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1);
}
}
#endif
if (exit.m_codeOrigin.inlineCallFrame)
jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);
Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock);
BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
ASSERT(mapping);
ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(baselineCodeBlock) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
jit.jitAssertTagsInPlace();
jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
jit.jump(GPRInfo::regT2);
}
示例2: handleExitCounts
void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
{
jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0);
AssemblyHelpers::Jump tooFewFails;
jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()));
jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
AssemblyHelpers::Jump reoptimizeNow = jit.branch32(
AssemblyHelpers::GreaterThanOrEqual,
AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
AssemblyHelpers::TrustedImm32(0));
tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()));
reoptimizeNow.link(&jit);
// Reoptimize as soon as possible.
#if !NUMBER_OF_ARGUMENT_REGISTERS
jit.poke(GPRInfo::regT0);
#else
jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
ASSERT(GPRInfo::argumentGPR0 != GPRInfo::regT1);
#endif
jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::regT1);
jit.call(GPRInfo::regT1);
AssemblyHelpers::Jump doneAdjusting = jit.jump();
tooFewFails.link(&jit);
// Adjust the execution counter such that the target is to only optimize after a while.
int32_t activeThreshold =
jit.baselineCodeBlock()->adjustedCounterValue(
Options::thresholdForOptimizeAfterLongWarmUp());
int32_t targetValue = ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
activeThreshold, jit.baselineCodeBlock());
int32_t clippedValue =
ExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
doneAdjusting.link(&jit);
}
示例3: emit
void JSCallBase::emit(CCallHelpers& jit)
{
m_callLinkInfo = jit.codeBlock()->addCallLinkInfo();
CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
CCallHelpers::NotEqual, GPRInfo::regT0, m_targetToCheck,
CCallHelpers::TrustedImmPtr(0));
m_fastCall = jit.nearCall();
CCallHelpers::Jump done = jit.jump();
slowPath.link(&jit);
jit.move(CCallHelpers::TrustedImmPtr(m_callLinkInfo), GPRInfo::regT2);
m_slowCall = jit.nearCall();
done.link(&jit);
}
示例4: emit
void JSCallBase::emit(CCallHelpers& jit, State& /*state*/, int32_t osrExitFromGenericUnwindStackSpillSlot)
{
RELEASE_ASSERT(!!m_callSiteIndex);
#if FTL_USES_B3
UNUSED_PARAM(osrExitFromGenericUnwindStackSpillSlot);
#else // FTL_USES_B3
if (m_correspondingGenericUnwindOSRExit)
m_correspondingGenericUnwindOSRExit->spillRegistersToSpillSlot(jit, osrExitFromGenericUnwindStackSpillSlot);
#endif // FTL_USES_B3
jit.store32(CCallHelpers::TrustedImm32(m_callSiteIndex.bits()), CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
m_callLinkInfo = jit.codeBlock()->addCallLinkInfo();
if (CallLinkInfo::callModeFor(m_type) == CallMode::Tail)
jit.emitRestoreCalleeSaves();
CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
CCallHelpers::NotEqual, GPRInfo::regT0, m_targetToCheck,
CCallHelpers::TrustedImmPtr(0));
CCallHelpers::Jump done;
if (CallLinkInfo::callModeFor(m_type) == CallMode::Tail) {
jit.prepareForTailCallSlow();
m_fastCall = jit.nearTailCall();
} else {
m_fastCall = jit.nearCall();
done = jit.jump();
}
slowPath.link(&jit);
jit.move(CCallHelpers::TrustedImmPtr(m_callLinkInfo), GPRInfo::regT2);
m_slowCall = jit.nearCall();
if (CallLinkInfo::callModeFor(m_type) == CallMode::Tail)
jit.abortWithReason(JITDidReturnFromTailCall);
else
done.link(&jit);
m_callLinkInfo->setUpCall(m_type, m_semanticeOrigin, GPRInfo::regT0);
}
示例5: handleExitCounts
void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
{
jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0);
AssemblyHelpers::Jump tooFewFails;
jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()));
jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
AssemblyHelpers::Jump reoptimizeNow = jit.branch32(
AssemblyHelpers::GreaterThanOrEqual,
AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
AssemblyHelpers::TrustedImm32(0));
// We want to figure out if there's a possibility that we're in a loop. For the outermost
// code block in the inline stack, we handle this appropriately by having the loop OSR trigger
// check the exit count of the replacement of the CodeBlock from which we are OSRing. The
// problem is the inlined functions, which might also have loops, but whose baseline versions
// don't know where to look for the exit count. Figure out if those loops are severe enough
// that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
// Otherwise, we should use the normal reoptimization trigger.
AssemblyHelpers::JumpList loopThreshold;
for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) {
loopThreshold.append(
jit.branchTest8(
AssemblyHelpers::NonZero,
AssemblyHelpers::AbsoluteAddress(
inlineCallFrame->executable->addressOfDidTryToEnterInLoop())));
}
jit.move(
AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()),
GPRInfo::regT1);
if (!loopThreshold.empty()) {
AssemblyHelpers::Jump done = jit.jump();
loopThreshold.link(&jit);
jit.move(
AssemblyHelpers::TrustedImm32(
jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()),
GPRInfo::regT1);
done.link(&jit);
}
tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
reoptimizeNow.link(&jit);
// Reoptimize as soon as possible.
#if !NUMBER_OF_ARGUMENT_REGISTERS
jit.poke(GPRInfo::regT0);
jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1);
#else
jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1);
#endif
jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0);
jit.call(GPRInfo::nonArgGPR0);
AssemblyHelpers::Jump doneAdjusting = jit.jump();
tooFewFails.link(&jit);
// Adjust the execution counter such that the target is to only optimize after a while.
int32_t activeThreshold =
jit.baselineCodeBlock()->adjustedCounterValue(
Options::thresholdForOptimizeAfterLongWarmUp());
int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt(
activeThreshold, jit.baselineCodeBlock());
int32_t clippedValue;
switch (jit.codeBlock()->jitType()) {
case JITCode::DFGJIT:
clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
break;
case JITCode::FTLJIT:
clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
clippedValue = 0; // Make some compilers, and mhahnenberg, happy.
#endif
break;
}
jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
doneAdjusting.link(&jit);
}
示例6: emit
void JSTailCall::emit(JITCode& jitCode, CCallHelpers& jit)
{
StackMaps::Record* record { nullptr };
for (unsigned i = jitCode.stackmaps.records.size(); i--;) {
record = &jitCode.stackmaps.records[i];
if (record->patchpointID == m_stackmapID)
break;
}
RELEASE_ASSERT(record->patchpointID == m_stackmapID);
m_callLinkInfo = jit.codeBlock()->addCallLinkInfo();
CallFrameShuffleData shuffleData;
// The callee was the first passed argument, and must be in a GPR because
// we used the "anyregcc" calling convention
auto calleeLocation =
FTL::Location::forStackmaps(nullptr, record->locations[0]);
GPRReg calleeGPR = calleeLocation.directGPR();
shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatJS);
// The tag type number was the second argument, if there was one
auto tagTypeNumberLocation =
FTL::Location::forStackmaps(&jitCode.stackmaps, record->locations[1]);
if (tagTypeNumberLocation.isGPR() && !tagTypeNumberLocation.addend())
shuffleData.tagTypeNumber = tagTypeNumberLocation.directGPR();
shuffleData.args.grow(numArguments());
HashMap<Reg, Vector<std::pair<ValueRecovery*, int32_t>>> withAddend;
size_t numAddends { 0 };
for (size_t i = 0; i < numArguments(); ++i) {
shuffleData.args[i] = recoveryFor(m_arguments[i], *record, jitCode.stackmaps);
if (FTL::Location addend = getRegisterWithAddend(m_arguments[i], *record, jitCode.stackmaps)) {
withAddend.add(
addend.reg(),
Vector<std::pair<ValueRecovery*, int32_t>>()).iterator->value.append(
std::make_pair(&shuffleData.args[i], addend.addend()));
numAddends++;
}
}
numAddends = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), numAddends);
shuffleData.numLocals = static_cast<int64_t>(jitCode.stackmaps.stackSizeForLocals()) / sizeof(void*) + numAddends;
ASSERT(!numAddends == withAddend.isEmpty());
if (!withAddend.isEmpty()) {
jit.subPtr(MacroAssembler::TrustedImm32(numAddends * sizeof(void*)), MacroAssembler::stackPointerRegister);
VirtualRegister spillBase { 1 - static_cast<int>(shuffleData.numLocals) };
for (auto entry : withAddend) {
for (auto pair : entry.value) {
ASSERT(numAddends > 0);
VirtualRegister spillSlot { spillBase + --numAddends };
ASSERT(entry.key.isGPR());
jit.addPtr(MacroAssembler::TrustedImm32(pair.second), entry.key.gpr());
jit.storePtr(entry.key.gpr(), CCallHelpers::addressFor(spillSlot));
jit.subPtr(MacroAssembler::TrustedImm32(pair.second), entry.key.gpr());
*pair.first = ValueRecovery::displacedInJSStack(spillSlot, pair.first->dataFormat());
}
}
ASSERT(numAddends < stackAlignmentRegisters());
}
shuffleData.args.resize(numArguments());
for (size_t i = 0; i < numArguments(); ++i)
shuffleData.args[i] = recoveryFor(m_arguments[i], *record, jitCode.stackmaps);
shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
CCallHelpers::NotEqual, calleeGPR, m_targetToCheck,
CCallHelpers::TrustedImmPtr(0));
m_callLinkInfo->setFrameShuffleData(shuffleData);
CallFrameShuffler(jit, shuffleData).prepareForTailCall();
m_fastCall = jit.nearTailCall();
slowPath.link(&jit);
CallFrameShuffler slowPathShuffler(jit, shuffleData);
slowPathShuffler.setCalleeJSValueRegs(JSValueRegs { GPRInfo::regT0 });
slowPathShuffler.prepareForSlowPath();
jit.move(CCallHelpers::TrustedImmPtr(m_callLinkInfo), GPRInfo::regT2);
m_slowCall = jit.nearCall();
jit.abortWithReason(JITDidReturnFromTailCall);
m_callLinkInfo->setUpCall(m_type, m_semanticeOrigin, calleeGPR);
}