本文整理汇总了C++中CCallHelpers类的典型用法代码示例。如果您正苦于以下问题:C++ CCallHelpers类的具体用法?C++ CCallHelpers怎么用?C++ CCallHelpers使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CCallHelpers类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: emitReportValue
void MethodOfGettingAValueProfile::emitReportValue(CCallHelpers& jit, JSValueRegs regs) const
{
switch (m_kind) {
case None:
return;
case Ready:
jit.storeValue(regs, u.profile->specFailBucket(0));
return;
case LazyOperand: {
LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand));
ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock);
LazyOperandValueProfile* profile =
u.lazyOperand.codeBlock->lazyOperandValueProfiles(locker).add(locker, key);
jit.storeValue(regs, profile->specFailBucket(0));
return;
}
case ArithProfileReady: {
u.arithProfile->emitObserveResult(jit, regs, DoNotHaveTagRegisters);
return;
} }
RELEASE_ASSERT_NOT_REACHED();
}
示例2: emitSetVarargsFrame
void emitSetVarargsFrame(CCallHelpers& jit, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR)
{
jit.move(numUsedSlotsGPR, resultGPR);
// We really want to make sure the size of the new call frame is a multiple of
// stackAlignmentRegisters(), however it is easier to accomplish this by
// rounding numUsedSlotsGPR to the next multiple of stackAlignmentRegisters().
// Together with the rounding below, we will assure that the new call frame is
// located on a stackAlignmentRegisters() boundary and a multiple of
// stackAlignmentRegisters() in size.
jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR);
jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR);
jit.addPtr(lengthGPR, resultGPR);
jit.addPtr(CCallHelpers::TrustedImm32(JSStack::CallFrameHeaderSize + (lengthIncludesThis? 0 : 1)), resultGPR);
// resultGPR now has the required frame size in Register units
// Round resultGPR to next multiple of stackAlignmentRegisters()
jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR);
jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR);
// Now resultGPR has the right stack frame offset in Register units.
jit.negPtr(resultGPR);
jit.lshiftPtr(CCallHelpers::Imm32(3), resultGPR);
jit.addPtr(GPRInfo::callFrameRegister, resultGPR);
}
示例3: static_assert
void BasicBlockLocation::emitExecuteCode(CCallHelpers& jit, MacroAssembler::RegisterID scratch) const
{
static_assert(sizeof(size_t) == 4, "Assuming size_t is 32 bits on 32 bit platforms.");
jit.load32(&m_executionCount, scratch);
CCallHelpers::Jump done = jit.branchAdd32(CCallHelpers::Zero, scratch, CCallHelpers::TrustedImm32(1), scratch);
jit.store32(scratch, bitwise_cast<void*>(&m_executionCount));
done.link(&jit);
}
示例4: dispatch
static void dispatch(CCallHelpers& jit, FTL::State* state, const B3::StackmapGenerationParams& params, DFG::Node* node, Box<CCallHelpers::JumpList> exceptions, CCallHelpers::JumpList from, OperationType operation, ResultType result, Arguments arguments, std::index_sequence<ArgumentsIndex...>)
{
CCallHelpers::Label done = jit.label();
params.addLatePath([=] (CCallHelpers& jit) {
AllowMacroScratchRegisterUsage allowScratch(jit);
from.link(&jit);
callOperation(
*state, params.unavailableRegisters(), jit, node->origin.semantic,
exceptions.get(), operation, extractResult(result), std::get<ArgumentsIndex>(arguments)...);
jit.jump().linkTo(done, &jit);
});
}
示例5: emitPointerValidation
inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
{
#if !ASSERT_DISABLED
CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
jit.breakpoint();
isNonZero.link(&jit);
jit.push(pointerGPR);
jit.load8(pointerGPR, pointerGPR);
jit.pop(pointerGPR);
#else
UNUSED_PARAM(jit);
UNUSED_PARAM(pointerGPR);
#endif
}
示例6: generateImpl
CCallHelpers::JumpList generateImpl(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit, std::index_sequence<ArgumentsIndex...>)
{
CCallHelpers::JumpList exceptions;
// We spill (1) the used registers by IC and (2) the used registers by Snippet.
AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(usedRegistersBySnippet);
jit.store32(
CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
jit.makeSpaceOnStackForCCall();
jit.setupArguments<FunctionType>(std::get<ArgumentsIndex>(m_arguments)...);
CCallHelpers::Call operationCall = jit.call(OperationPtrTag);
auto function = m_function;
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
linkBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(function));
});
jit.setupResults(m_result);
jit.reclaimSpaceOnStackForCCall();
CCallHelpers::Jump noException = jit.emitExceptionCheck(state.m_vm, CCallHelpers::InvertedExceptionCheck);
state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
exceptions.append(jit.jump());
noException.link(&jit);
RegisterSet dontRestore;
dontRestore.set(m_result);
state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);
return exceptions;
}
示例7: emitSetDouble
void ArithProfile::emitObserveResult(CCallHelpers& jit, JSValueRegs regs, TagRegistersMode mode)
{
if (!shouldEmitSetDouble() && !shouldEmitSetNonNumber())
return;
CCallHelpers::Jump isInt32 = jit.branchIfInt32(regs, mode);
CCallHelpers::Jump notDouble = jit.branchIfNotDoubleKnownNotInt32(regs, mode);
emitSetDouble(jit);
CCallHelpers::Jump done = jit.jump();
notDouble.link(&jit);
emitSetNonNumber(jit);
done.link(&jit);
isInt32.link(&jit);
}
示例8: generate
CCallHelpers::JumpList generate(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit) override
{
m_from.link(&jit);
CCallHelpers::JumpList exceptions = generateImpl(state, usedRegistersBySnippet, jit, std::make_index_sequence<std::tuple_size<std::tuple<Arguments...>>::value>());
jit.jump().linkTo(m_to, &jit);
return exceptions;
}
示例9:
Box<CCallHelpers::JumpList> ExceptionTarget::jumps(CCallHelpers& jit)
{
Box<CCallHelpers::JumpList> result = Box<CCallHelpers::JumpList>::create();
if (m_isDefaultHandler) {
Box<CCallHelpers::Label> defaultHandler = m_defaultHandler;
jit.addLinkTask(
[=] (LinkBuffer& linkBuffer) {
linkBuffer.link(*result, linkBuffer.locationOf(*defaultHandler));
});
} else {
RefPtr<OSRExitHandle> handle = m_handle;
jit.addLinkTask(
[=] (LinkBuffer& linkBuffer) {
linkBuffer.link(*result, linkBuffer.locationOf(handle->label));
});
}
return result;
}
示例10: storeCodeOrigin
void storeCodeOrigin(State& state, CCallHelpers& jit, CodeOrigin codeOrigin)
{
if (!codeOrigin.isSet())
return;
unsigned index = state.jitCode->common.addCodeOrigin(codeOrigin);
unsigned locationBits = CallFrame::Location::encodeAsCodeOriginIndex(index);
jit.store32(
CCallHelpers::TrustedImm32(locationBits),
CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
}
示例11: emitExitThunk
void OSRExitHandle::emitExitThunk(State& state, CCallHelpers& jit)
{
Profiler::Compilation* compilation = state.graph.compilation();
CCallHelpers::Label myLabel = jit.label();
label = myLabel;
jit.pushToSaveImmediateWithoutTouchingRegisters(CCallHelpers::TrustedImm32(index));
CCallHelpers::PatchableJump jump = jit.patchableJump();
RefPtr<OSRExitHandle> self = this;
VM& vm = state.vm();
jit.addLinkTask(
[self, jump, myLabel, compilation, &vm] (LinkBuffer& linkBuffer) {
self->exit.m_patchableJump = CodeLocationJump<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(jump));
linkBuffer.link(
jump.m_jump,
CodeLocationLabel<JITThunkPtrTag>(vm.getCTIStub(osrExitGenerationThunkGenerator).code()));
if (compilation)
compilation->addOSRExitSite({ linkBuffer.locationOf<JSInternalPtrTag>(myLabel) });
});
}
示例12: switch
CCallHelpers::Jump CCallSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext&)
{
switch (inst.args[calleeArgOffset].kind()) {
case Arg::Imm:
case Arg::Imm64:
jit.move(inst.args[calleeArgOffset].asTrustedImmPtr(), scratchRegister);
jit.call(scratchRegister);
break;
case Arg::Tmp:
jit.call(inst.args[calleeArgOffset].gpr());
break;
case Arg::Addr:
jit.call(inst.args[calleeArgOffset].asAddress());
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
return CCallHelpers::Jump();
}
示例13: adjustAndJumpToTarget
void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
{
if (exit.m_codeOrigin.inlineCallFrame)
jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);
Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock);
BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
ASSERT(mapping);
ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
jit.jump(GPRInfo::regT2);
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF(" -> %p\n", jumpTarget);
#endif
}
示例14: callOperation
MacroAssembler::Call callOperation(
State& state, const RegisterSet& usedRegisters, CCallHelpers& jit,
CodeOrigin codeOrigin, MacroAssembler::JumpList* exceptionTarget,
V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg value,
GPRReg object, StringImpl* uid)
{
storeCodeOrigin(state, jit, codeOrigin);
CallContext context(state, usedRegisters, jit, 5, InvalidGPRReg);
jit.setupArgumentsWithExecState(
CCallHelpers::TrustedImmPtr(stubInfo), value, object,
CCallHelpers::TrustedImmPtr(uid));
return context.makeCall(bitwise_cast<void*>(operation), exceptionTarget);
}
示例15: ASSERT
bool JITNegGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
{
ASSERT(m_scratchGPR != m_src.payloadGPR());
ASSERT(m_scratchGPR != m_result.payloadGPR());
ASSERT(m_scratchGPR != InvalidGPRReg);
#if USE(JSVALUE32_64)
ASSERT(m_scratchGPR != m_src.tagGPR());
ASSERT(m_scratchGPR != m_result.tagGPR());
#endif
jit.moveValueRegs(m_src, m_result);
CCallHelpers::Jump srcNotInt = jit.branchIfNotInt32(m_src);
// -0 should produce a double, and hence cannot be negated as an int.
// The negative int32 0x80000000 doesn't have a positive int32 representation, and hence cannot be negated as an int.
slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_src.payloadGPR(), CCallHelpers::TrustedImm32(0x7fffffff)));
jit.neg32(m_result.payloadGPR());
#if USE(JSVALUE64)
jit.boxInt32(m_result.payloadGPR(), m_result);
#endif
endJumpList.append(jit.jump());
srcNotInt.link(&jit);
slowPathJumpList.append(jit.branchIfNotNumber(m_src, m_scratchGPR));
// For a double, all we need to do is to invert the sign bit.
#if USE(JSVALUE64)
jit.move(CCallHelpers::TrustedImm64((int64_t)(1ull << 63)), m_scratchGPR);
jit.xor64(m_scratchGPR, m_result.payloadGPR());
#else
jit.xor32(CCallHelpers::TrustedImm32(1 << 31), m_result.tagGPR());
#endif
// The flags of ArithNegate are basic in DFG.
// We only need to know if we ever produced a number.
if (shouldEmitProfiling && arithProfile && !arithProfile->lhsObservedType().sawNumber() && !arithProfile->didObserveDouble())
arithProfile->emitSetDouble(jit);
return true;
}