本文整理汇总了C++中ccallhelpers::JumpList::append方法的典型用法代码示例。如果您正苦于以下问题:C++ JumpList::append方法的具体用法?C++ JumpList::append怎么用?C++ JumpList::append使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ccallhelpers::JumpList
的用法示例。
在下文中一共展示了JumpList::append方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: generateImpl
CCallHelpers::JumpList generateImpl(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit, std::index_sequence<ArgumentsIndex...>)
{
CCallHelpers::JumpList exceptions;
// We spill (1) the used registers by IC and (2) the used registers by Snippet.
AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(usedRegistersBySnippet);
jit.store32(
CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
jit.makeSpaceOnStackForCCall();
jit.setupArguments<FunctionType>(std::get<ArgumentsIndex>(m_arguments)...);
CCallHelpers::Call operationCall = jit.call(OperationPtrTag);
auto function = m_function;
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
linkBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(function));
});
jit.setupResults(m_result);
jit.reclaimSpaceOnStackForCCall();
CCallHelpers::Jump noException = jit.emitExceptionCheck(state.m_vm, CCallHelpers::InvertedExceptionCheck);
state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
exceptions.append(jit.jump());
noException.link(&jit);
RegisterSet dontRestore;
dontRestore.set(m_result);
state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);
return exceptions;
}
示例2: emitSlowPathCalls
CCallHelpers::JumpList AccessCaseSnippetParams::emitSlowPathCalls(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit)
{
CCallHelpers::JumpList exceptions;
for (auto& generator : m_generators)
exceptions.append(generator->generate(state, usedRegistersBySnippet, jit));
return exceptions;
}
示例3: emitSetupVarargsFrameFastCase
void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
{
CCallHelpers::JumpList end;
if (argCountRecovery.isConstant()) {
// FIXME: We could constant-fold a lot of the computation below in this case.
// https://bugs.webkit.org/show_bug.cgi?id=141486
jit.move(CCallHelpers::TrustedImm32(argCountRecovery.constant().asInt32()), scratchGPR1);
} else
jit.load32(CCallHelpers::payloadFor(argCountRecovery.virtualRegister()), scratchGPR1);
if (firstVarArgOffset) {
CCallHelpers::Jump sufficientArguments = jit.branch32(CCallHelpers::GreaterThan, scratchGPR1, CCallHelpers::TrustedImm32(firstVarArgOffset + 1));
jit.move(CCallHelpers::TrustedImm32(1), scratchGPR1);
CCallHelpers::Jump endVarArgs = jit.jump();
sufficientArguments.link(&jit);
jit.sub32(CCallHelpers::TrustedImm32(firstVarArgOffset), scratchGPR1);
endVarArgs.link(&jit);
}
slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR1, CCallHelpers::TrustedImm32(maxArguments + 1)));
emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2);
slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfStackLimit()), scratchGPR2));
// Initialize ArgumentCount.
jit.store32(scratchGPR1, CCallHelpers::Address(scratchGPR2, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));
// Copy arguments.
jit.signExtend32ToPtr(scratchGPR1, scratchGPR1);
CCallHelpers::Jump done = jit.branchSubPtr(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), scratchGPR1);
// scratchGPR1: argumentCount
CCallHelpers::Label copyLoop = jit.label();
int argOffset = (firstArgumentReg.offset() - 1 + firstVarArgOffset) * static_cast<int>(sizeof(Register));
#if USE(JSVALUE64)
jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset), scratchGPR3);
jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
#else // USE(JSVALUE64), so this begins the 32-bit case
jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + TagOffset), scratchGPR3);
jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + TagOffset));
jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + PayloadOffset), scratchGPR3);
jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + PayloadOffset));
#endif // USE(JSVALUE64), end of 32-bit case
jit.branchSubPtr(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), scratchGPR1).linkTo(copyLoop, &jit);
done.link(&jit);
}
示例4: generateFastPath
bool JITNegGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
{
ASSERT(m_scratchGPR != m_src.payloadGPR());
ASSERT(m_scratchGPR != m_result.payloadGPR());
ASSERT(m_scratchGPR != InvalidGPRReg);
#if USE(JSVALUE32_64)
ASSERT(m_scratchGPR != m_src.tagGPR());
ASSERT(m_scratchGPR != m_result.tagGPR());
#endif
jit.moveValueRegs(m_src, m_result);
CCallHelpers::Jump srcNotInt = jit.branchIfNotInt32(m_src);
// -0 should produce a double, and hence cannot be negated as an int.
// The negative int32 0x80000000 doesn't have a positive int32 representation, and hence cannot be negated as an int.
slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_src.payloadGPR(), CCallHelpers::TrustedImm32(0x7fffffff)));
jit.neg32(m_result.payloadGPR());
#if USE(JSVALUE64)
jit.boxInt32(m_result.payloadGPR(), m_result);
#endif
endJumpList.append(jit.jump());
srcNotInt.link(&jit);
slowPathJumpList.append(jit.branchIfNotNumber(m_src, m_scratchGPR));
// For a double, all we need to do is to invert the sign bit.
#if USE(JSVALUE64)
jit.move(CCallHelpers::TrustedImm64((int64_t)(1ull << 63)), m_scratchGPR);
jit.xor64(m_scratchGPR, m_result.payloadGPR());
#else
jit.xor32(CCallHelpers::TrustedImm32(1 << 31), m_result.tagGPR());
#endif
// The flags of ArithNegate are basic in DFG.
// We only need to know if we ever produced a number.
if (shouldEmitProfiling && arithProfile && !arithProfile->lhsObservedType().sawNumber() && !arithProfile->didObserveDouble())
arithProfile->emitSetDouble(jit);
return true;
}
示例5: virtualForThunkGenerator
static MacroAssemblerCodeRef virtualForThunkGenerator(
VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
{
// The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
// The return address is on the stack, or in the link register. We will hence
// jump to the callee, or save the return address to the call frame while we
// make a C++ function call to the appropriate JIT operation.
CCallHelpers jit(vm);
CCallHelpers::JumpList slowCase;
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT2);
slowCase.append(
jit.branchTest64(
CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT2));
#else
slowCase.append(
jit.branch32(
CCallHelpers::NotEqual, GPRInfo::regT1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT2, GPRInfo::regT1);
slowCase.append(
jit.branchPtr(
CCallHelpers::NotEqual,
CCallHelpers::Address(GPRInfo::regT2, Structure::classInfoOffset()),
CCallHelpers::TrustedImmPtr(JSFunction::info())));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
GPRInfo::regT2);
jit.loadPtr(
CCallHelpers::Address(
GPRInfo::regT2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)),
GPRInfo::regT2);
slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT2));
// Now we know that we have a CodeBlock, and we're committed to making a fast
// call.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()),
GPRInfo::regT1);
#if USE(JSVALUE64)
jit.emitPutToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
#else
jit.emitPutPayloadToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
jit.emitPutTagToCallFrameHeaderBeforePrologue(CCallHelpers::TrustedImm32(JSValue::CellTag),
JSStack::ScopeChain);
#endif
// Make a tail call. This will return back to JIT code.
emitPointerValidation(jit, GPRInfo::regT2);
jit.jump(GPRInfo::regT2);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
slowPathFor(jit, vm, operationVirtualFor(kind, registers));
LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
}
示例6: virtualForThunkGenerator
static MacroAssemblerCodeRef virtualForThunkGenerator(
JSGlobalData* globalData, CodeSpecializationKind kind)
{
// The return address is on the stack, or in the link register. We will hence
// jump to the callee, or save the return address to the call frame while we
// make a C++ function call to the appropriate DFG operation.
CCallHelpers jit(globalData);
CCallHelpers::JumpList slowCase;
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
slowCase.append(
jit.branchTestPtr(
CCallHelpers::NonZero, GPRInfo::nonArgGPR0, GPRInfo::tagMaskRegister));
#else
slowCase.append(
jit.branch32(
CCallHelpers::NotEqual, GPRInfo::nonArgGPR1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
jit.loadPtr(CCallHelpers::Address(GPRInfo::nonArgGPR0, JSCell::structureOffset()), GPRInfo::nonArgGPR2);
slowCase.append(
jit.branchPtr(
CCallHelpers::NotEqual,
CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()),
CCallHelpers::TrustedImmPtr(&JSFunction::s_info)));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfExecutable()),
GPRInfo::nonArgGPR2);
slowCase.append(
jit.branch32(
CCallHelpers::LessThan,
CCallHelpers::Address(
GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)),
CCallHelpers::TrustedImm32(0)));
// Now we know that we have a CodeBlock, and we're committed to making a fast
// call.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfScopeChain()),
GPRInfo::nonArgGPR1);
#if USE(JSVALUE64)
jit.storePtr(
GPRInfo::nonArgGPR1,
CCallHelpers::Address(
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain));
#else
jit.storePtr(
GPRInfo::nonArgGPR1,
CCallHelpers::Address(
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain +
OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
jit.store32(
CCallHelpers::TrustedImm32(JSValue::CellTag),
CCallHelpers::Address(
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain +
OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
#endif
jit.loadPtr(
CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)),
GPRInfo::regT0);
// Make a tail call. This will return back to DFG code.
emitPointerValidation(jit, GPRInfo::regT0);
jit.jump(GPRInfo::regT0);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
slowPathFor(jit, globalData, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct);
LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
("DFG virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
}
示例7: link
//.........这里部分代码省略.........
}
dumpContext.dump(out, prefix);
compilation->addDescription(Profiler::OriginStack(), out.toCString());
out.reset();
out.print(" Disassembly:\n");
#if FTL_USES_B3
out.print(" <not implemented yet>\n");
#else
for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
if (state.codeSectionNames[i] != SECTION_NAME("text"))
continue;
ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
disassemble(
MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
" ", out, LLVMSubset);
}
#endif
compilation->addDescription(Profiler::OriginStack(), out.toCString());
out.reset();
state.jitCode->common.compilation = compilation;
}
switch (graph.m_plan.mode) {
case FTLMode: {
CCallHelpers::JumpList mainPathJumps;
jit.load32(
frame.withOffset(sizeof(Register) * JSStack::ArgumentCount),
GPRInfo::regT1);
mainPathJumps.append(jit.branch32(
CCallHelpers::AboveOrEqual, GPRInfo::regT1,
CCallHelpers::TrustedImm32(codeBlock->numParameters())));
jit.emitFunctionPrologue();
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
jit.store32(
CCallHelpers::TrustedImm32(CallSiteIndex(0).bits()),
CCallHelpers::tagFor(JSStack::ArgumentCount));
jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
CCallHelpers::Call callArityCheck = jit.call();
#if !ASSERT_DISABLED
// FIXME: need to make this call register with exception handling somehow. This is
// part of a bigger problem: FTL should be able to handle exceptions.
// https://bugs.webkit.org/show_bug.cgi?id=113622
// Until then, use a JIT ASSERT.
jit.load64(vm.addressOfException(), GPRInfo::regT1);
jit.jitAssertIsNull(GPRInfo::regT1);
#endif
jit.move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
jit.emitFunctionEpilogue();
mainPathJumps.append(jit.branchTest32(CCallHelpers::Zero, GPRInfo::argumentGPR0));
jit.emitFunctionPrologue();
CCallHelpers::Call callArityFixup = jit.call();
jit.emitFunctionEpilogue();
mainPathJumps.append(jit.jump());
linkBuffer = std::make_unique<LinkBuffer>(vm, jit, codeBlock, JITCompilationCanFail);
if (linkBuffer->didFailToAllocate()) {
state.allocationFailed = true;
return;
}
linkBuffer->link(callArityCheck, codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
linkBuffer->link(callArityFixup, FunctionPtr((vm.getCTIStub(arityFixupGenerator)).code().executableAddress()));
示例8: virtualThunkFor
// FIXME: We should distinguish between a megamorphic virtual call vs. a slow
// path virtual call so that we can enable fast tail calls for megamorphic
// virtual calls by using the shuffler.
// https://bugs.webkit.org/show_bug.cgi?id=148831
MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
{
// The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
// The return address is on the stack, or in the link register. We will hence
// jump to the callee, or save the return address to the call frame while we
// make a C++ function call to the appropriate JIT operation.
CCallHelpers jit(vm);
CCallHelpers::JumpList slowCase;
// This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
// slow path execution for the profiler.
jit.add32(
CCallHelpers::TrustedImm32(1),
CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
slowCase.append(
jit.branchTest64(
CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
#else
slowCase.append(
jit.branch32(
CCallHelpers::NotEqual, GPRInfo::regT1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
slowCase.append(
jit.branchPtr(
CCallHelpers::NotEqual,
CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
CCallHelpers::TrustedImmPtr(JSFunction::info())));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
GPRInfo::regT4);
jit.loadPtr(
CCallHelpers::Address(
GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
callLinkInfo.specializationKind())),
GPRInfo::regT4);
slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
// Now we know that we have a CodeBlock, and we're committed to making a fast
// call.
// Make a tail call. This will return back to JIT code.
emitPointerValidation(jit, GPRInfo::regT4);
if (callLinkInfo.isTailCall()) {
jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
jit.prepareForTailCallSlow(GPRInfo::regT4);
}
jit.jump(GPRInfo::regT4);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
slowPathFor(jit, vm, operationVirtualCall);
LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
("Virtual %s slow path thunk",
callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
}