本文整理汇总了C++中ccallhelpers::JumpList类的典型用法代码示例。如果您正苦于以下问题:C++ JumpList类的具体用法?C++ JumpList怎么用?C++ JumpList使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了JumpList类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: generateImpl
CCallHelpers::JumpList generateImpl(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit, std::index_sequence<ArgumentsIndex...>)
{
CCallHelpers::JumpList exceptions;
// We spill (1) the used registers by IC and (2) the used registers by Snippet.
AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(usedRegistersBySnippet);
jit.store32(
CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
jit.makeSpaceOnStackForCCall();
jit.setupArguments<FunctionType>(std::get<ArgumentsIndex>(m_arguments)...);
CCallHelpers::Call operationCall = jit.call(OperationPtrTag);
auto function = m_function;
jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
linkBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(function));
});
jit.setupResults(m_result);
jit.reclaimSpaceOnStackForCCall();
CCallHelpers::Jump noException = jit.emitExceptionCheck(state.m_vm, CCallHelpers::InvertedExceptionCheck);
state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
exceptions.append(jit.jump());
noException.link(&jit);
RegisterSet dontRestore;
dontRestore.set(m_result);
state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);
return exceptions;
}
示例2: emitSlowPathCalls
CCallHelpers::JumpList AccessCaseSnippetParams::emitSlowPathCalls(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit)
{
CCallHelpers::JumpList exceptions;
for (auto& generator : m_generators)
exceptions.append(generator->generate(state, usedRegistersBySnippet, jit));
return exceptions;
}
示例3: emitSetupVarargsFrameFastCase
void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
{
CCallHelpers::JumpList end;
if (argCountRecovery.isConstant()) {
// FIXME: We could constant-fold a lot of the computation below in this case.
// https://bugs.webkit.org/show_bug.cgi?id=141486
jit.move(CCallHelpers::TrustedImm32(argCountRecovery.constant().asInt32()), scratchGPR1);
} else
jit.load32(CCallHelpers::payloadFor(argCountRecovery.virtualRegister()), scratchGPR1);
if (firstVarArgOffset) {
CCallHelpers::Jump sufficientArguments = jit.branch32(CCallHelpers::GreaterThan, scratchGPR1, CCallHelpers::TrustedImm32(firstVarArgOffset + 1));
jit.move(CCallHelpers::TrustedImm32(1), scratchGPR1);
CCallHelpers::Jump endVarArgs = jit.jump();
sufficientArguments.link(&jit);
jit.sub32(CCallHelpers::TrustedImm32(firstVarArgOffset), scratchGPR1);
endVarArgs.link(&jit);
}
slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR1, CCallHelpers::TrustedImm32(maxArguments + 1)));
emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2);
slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfStackLimit()), scratchGPR2));
// Initialize ArgumentCount.
jit.store32(scratchGPR1, CCallHelpers::Address(scratchGPR2, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));
// Copy arguments.
jit.signExtend32ToPtr(scratchGPR1, scratchGPR1);
CCallHelpers::Jump done = jit.branchSubPtr(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), scratchGPR1);
// scratchGPR1: argumentCount
CCallHelpers::Label copyLoop = jit.label();
int argOffset = (firstArgumentReg.offset() - 1 + firstVarArgOffset) * static_cast<int>(sizeof(Register));
#if USE(JSVALUE64)
jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset), scratchGPR3);
jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
#else // USE(JSVALUE64), so this begins the 32-bit case
jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + TagOffset), scratchGPR3);
jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + TagOffset));
jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + PayloadOffset), scratchGPR3);
jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + PayloadOffset));
#endif // USE(JSVALUE64), end of 32-bit case
jit.branchSubPtr(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), scratchGPR1).linkTo(copyLoop, &jit);
done.link(&jit);
}
示例4: generateFastPath
bool JITNegGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
{
ASSERT(m_scratchGPR != m_src.payloadGPR());
ASSERT(m_scratchGPR != m_result.payloadGPR());
ASSERT(m_scratchGPR != InvalidGPRReg);
#if USE(JSVALUE32_64)
ASSERT(m_scratchGPR != m_src.tagGPR());
ASSERT(m_scratchGPR != m_result.tagGPR());
#endif
jit.moveValueRegs(m_src, m_result);
CCallHelpers::Jump srcNotInt = jit.branchIfNotInt32(m_src);
// -0 should produce a double, and hence cannot be negated as an int.
// The negative int32 0x80000000 doesn't have a positive int32 representation, and hence cannot be negated as an int.
slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_src.payloadGPR(), CCallHelpers::TrustedImm32(0x7fffffff)));
jit.neg32(m_result.payloadGPR());
#if USE(JSVALUE64)
jit.boxInt32(m_result.payloadGPR(), m_result);
#endif
endJumpList.append(jit.jump());
srcNotInt.link(&jit);
slowPathJumpList.append(jit.branchIfNotNumber(m_src, m_scratchGPR));
// For a double, all we need to do is to invert the sign bit.
#if USE(JSVALUE64)
jit.move(CCallHelpers::TrustedImm64((int64_t)(1ull << 63)), m_scratchGPR);
jit.xor64(m_scratchGPR, m_result.payloadGPR());
#else
jit.xor32(CCallHelpers::TrustedImm32(1 << 31), m_result.tagGPR());
#endif
// The flags of ArithNegate are basic in DFG.
// We only need to know if we ever produced a number.
if (shouldEmitProfiling && arithProfile && !arithProfile->lhsObservedType().sawNumber() && !arithProfile->didObserveDouble())
arithProfile->emitSetDouble(jit);
return true;
}
示例5: dispatch
static void dispatch(CCallHelpers& jit, FTL::State* state, const B3::StackmapGenerationParams& params, DFG::Node* node, Box<CCallHelpers::JumpList> exceptions, CCallHelpers::JumpList from, OperationType operation, ResultType result, Arguments arguments, std::index_sequence<ArgumentsIndex...>)
{
CCallHelpers::Label done = jit.label();
params.addLatePath([=] (CCallHelpers& jit) {
AllowMacroScratchRegisterUsage allowScratch(jit);
from.link(&jit);
callOperation(
*state, params.unavailableRegisters(), jit, node->origin.semantic,
exceptions.get(), operation, extractResult(result), std::get<ArgumentsIndex>(arguments)...);
jit.jump().linkTo(done, &jit);
});
}
示例6: emit
void JSCallVarargs::emit(CCallHelpers& jit, State& state, int32_t spillSlotsOffset, int32_t osrExitFromGenericUnwindSpillSlots)
{
// We are passed three pieces of information:
// - The callee.
// - The arguments object, if it's not a forwarding call.
// - The "this" value, if it's a constructor call.
CallVarargsData* data = m_node->callVarargsData();
GPRReg calleeGPR = GPRInfo::argumentGPR0;
GPRReg argumentsGPR = InvalidGPRReg;
GPRReg thisGPR = InvalidGPRReg;
bool forwarding = false;
switch (m_node->op()) {
case CallVarargs:
case TailCallVarargs:
case TailCallVarargsInlinedCaller:
case ConstructVarargs:
argumentsGPR = GPRInfo::argumentGPR1;
thisGPR = GPRInfo::argumentGPR2;
break;
case CallForwardVarargs:
case TailCallForwardVarargs:
case TailCallForwardVarargsInlinedCaller:
case ConstructForwardVarargs:
thisGPR = GPRInfo::argumentGPR1;
forwarding = true;
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
const unsigned calleeSpillSlot = 0;
const unsigned argumentsSpillSlot = 1;
const unsigned thisSpillSlot = 2;
const unsigned stackPointerSpillSlot = 3;
// Get some scratch registers.
RegisterSet usedRegisters;
usedRegisters.merge(RegisterSet::stackRegisters());
usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
usedRegisters.merge(RegisterSet::calleeSaveRegisters());
usedRegisters.set(calleeGPR);
if (argumentsGPR != InvalidGPRReg)
usedRegisters.set(argumentsGPR);
ASSERT(thisGPR);
usedRegisters.set(thisGPR);
ScratchRegisterAllocator allocator(usedRegisters);
GPRReg scratchGPR1 = allocator.allocateScratchGPR();
GPRReg scratchGPR2 = allocator.allocateScratchGPR();
GPRReg scratchGPR3 = allocator.allocateScratchGPR();
RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
auto computeUsedStack = [&] (GPRReg targetGPR, unsigned extra) {
if (isARM64()) {
// Have to do this the weird way because $sp on ARM64 means zero when used in a subtraction.
jit.move(CCallHelpers::stackPointerRegister, targetGPR);
jit.negPtr(targetGPR);
jit.addPtr(GPRInfo::callFrameRegister, targetGPR);
} else {
jit.move(GPRInfo::callFrameRegister, targetGPR);
jit.subPtr(CCallHelpers::stackPointerRegister, targetGPR);
}
if (extra)
jit.subPtr(CCallHelpers::TrustedImm32(extra), targetGPR);
jit.urshiftPtr(CCallHelpers::Imm32(3), targetGPR);
};
auto callWithExceptionCheck = [&] (void* callee) {
jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
jit.call(GPRInfo::nonPreservedNonArgumentGPR);
m_exceptions.append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
};
if (isARM64()) {
jit.move(CCallHelpers::stackPointerRegister, scratchGPR1);
jit.storePtr(scratchGPR1, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));
} else
jit.storePtr(CCallHelpers::stackPointerRegister, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));
unsigned extraStack = sizeof(CallerFrameAndPC) +
WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*));
if (forwarding) {
CCallHelpers::JumpList slowCase;
computeUsedStack(scratchGPR2, 0);
emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, m_node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
CCallHelpers::Jump done = jit.jump();
slowCase.link(&jit);
jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister);
jit.setupArgumentsExecState();
callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
//.........这里部分代码省略.........
示例7: emitDOMJITGetter
//.........这里部分代码省略.........
allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
#endif
allocator.lock(valueRegs);
allocator.lock(scratchGPR);
GPRReg paramBaseGPR = InvalidGPRReg;
GPRReg paramGlobalObjectGPR = InvalidGPRReg;
JSValueRegs paramValueRegs = valueRegs;
GPRReg remainingScratchGPR = InvalidGPRReg;
// valueRegs and baseForGetGPR may be the same. For example, in Baseline JIT, we pass the same regT0 for baseGPR and valueRegs.
// In FTL, there is no constraint that the baseForGetGPR interferes with the result. To make implementation simple in
// Snippet, Snippet assumes that result registers always early interfere with input registers, in this case,
// baseForGetGPR. So we move baseForGetGPR to the other register if baseForGetGPR == valueRegs.
if (baseForGetGPR != valueRegs.payloadGPR()) {
paramBaseGPR = baseForGetGPR;
if (!snippet->requireGlobalObject)
remainingScratchGPR = scratchGPR;
else
paramGlobalObjectGPR = scratchGPR;
} else {
jit.move(valueRegs.payloadGPR(), scratchGPR);
paramBaseGPR = scratchGPR;
if (snippet->requireGlobalObject)
paramGlobalObjectGPR = allocator.allocateScratchGPR();
}
JSGlobalObject* globalObjectForDOMJIT = structure()->globalObject();
regs.append(paramValueRegs);
regs.append(paramBaseGPR);
if (snippet->requireGlobalObject) {
ASSERT(paramGlobalObjectGPR != InvalidGPRReg);
regs.append(SnippetParams::Value(paramGlobalObjectGPR, globalObjectForDOMJIT));
}
if (snippet->numGPScratchRegisters) {
unsigned i = 0;
if (remainingScratchGPR != InvalidGPRReg) {
gpScratch.append(remainingScratchGPR);
++i;
}
for (; i < snippet->numGPScratchRegisters; ++i)
gpScratch.append(allocator.allocateScratchGPR());
}
for (unsigned i = 0; i < snippet->numFPScratchRegisters; ++i)
fpScratch.append(allocator.allocateScratchFPR());
// Let's store the reused registers to the stack. After that, we can use allocated scratch registers.
ScratchRegisterAllocator::PreservedState preservedState =
allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
if (verbose) {
dataLog("baseGPR = ", baseGPR, "\n");
dataLog("valueRegs = ", valueRegs, "\n");
dataLog("scratchGPR = ", scratchGPR, "\n");
dataLog("paramBaseGPR = ", paramBaseGPR, "\n");
if (paramGlobalObjectGPR != InvalidGPRReg)
dataLog("paramGlobalObjectGPR = ", paramGlobalObjectGPR, "\n");
dataLog("paramValueRegs = ", paramValueRegs, "\n");
for (unsigned i = 0; i < snippet->numGPScratchRegisters; ++i)
dataLog("gpScratch[", i, "] = ", gpScratch[i], "\n");
}
if (snippet->requireGlobalObject)
jit.move(CCallHelpers::TrustedImmPtr(globalObjectForDOMJIT), paramGlobalObjectGPR);
// We just spill the registers used in Snippet here. For not spilled registers here explicitly,
// they must be in the used register set passed by the callers (Baseline, DFG, and FTL) if they need to be kept.
// Some registers can be locked, but not in the used register set. For example, the caller could make baseGPR
// same to valueRegs, and not include it in the used registers since it will be changed.
RegisterSet registersToSpillForCCall;
for (auto& value : regs) {
SnippetReg reg = value.reg();
if (reg.isJSValueRegs())
registersToSpillForCCall.set(reg.jsValueRegs());
else if (reg.isGPR())
registersToSpillForCCall.set(reg.gpr());
else
registersToSpillForCCall.set(reg.fpr());
}
for (GPRReg reg : gpScratch)
registersToSpillForCCall.set(reg);
for (FPRReg reg : fpScratch)
registersToSpillForCCall.set(reg);
registersToSpillForCCall.exclude(RegisterSet::registersToNotSaveForCCall());
AccessCaseSnippetParams params(state.m_vm, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
snippet->generator()->run(jit, params);
allocator.restoreReusedRegistersByPopping(jit, preservedState);
state.succeed();
CCallHelpers::JumpList exceptions = params.emitSlowPathCalls(state, registersToSpillForCCall, jit);
if (!exceptions.empty()) {
exceptions.link(&jit);
allocator.restoreReusedRegistersByPopping(jit, preservedState);
state.emitExplicitExceptionHandler();
}
}
示例8: virtualForThunkGenerator
static MacroAssemblerCodeRef virtualForThunkGenerator(
VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
{
// The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
// The return address is on the stack, or in the link register. We will hence
// jump to the callee, or save the return address to the call frame while we
// make a C++ function call to the appropriate JIT operation.
CCallHelpers jit(vm);
CCallHelpers::JumpList slowCase;
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT2);
slowCase.append(
jit.branchTest64(
CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT2));
#else
slowCase.append(
jit.branch32(
CCallHelpers::NotEqual, GPRInfo::regT1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT2, GPRInfo::regT1);
slowCase.append(
jit.branchPtr(
CCallHelpers::NotEqual,
CCallHelpers::Address(GPRInfo::regT2, Structure::classInfoOffset()),
CCallHelpers::TrustedImmPtr(JSFunction::info())));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
GPRInfo::regT2);
jit.loadPtr(
CCallHelpers::Address(
GPRInfo::regT2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)),
GPRInfo::regT2);
slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT2));
// Now we know that we have a CodeBlock, and we're committed to making a fast
// call.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()),
GPRInfo::regT1);
#if USE(JSVALUE64)
jit.emitPutToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
#else
jit.emitPutPayloadToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
jit.emitPutTagToCallFrameHeaderBeforePrologue(CCallHelpers::TrustedImm32(JSValue::CellTag),
JSStack::ScopeChain);
#endif
// Make a tail call. This will return back to JIT code.
emitPointerValidation(jit, GPRInfo::regT2);
jit.jump(GPRInfo::regT2);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
slowPathFor(jit, vm, operationVirtualFor(kind, registers));
LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
}
示例9: virtualForThunkGenerator
static MacroAssemblerCodeRef virtualForThunkGenerator(
JSGlobalData* globalData, CodeSpecializationKind kind)
{
// The return address is on the stack, or in the link register. We will hence
// jump to the callee, or save the return address to the call frame while we
// make a C++ function call to the appropriate DFG operation.
CCallHelpers jit(globalData);
CCallHelpers::JumpList slowCase;
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
slowCase.append(
jit.branchTestPtr(
CCallHelpers::NonZero, GPRInfo::nonArgGPR0, GPRInfo::tagMaskRegister));
#else
slowCase.append(
jit.branch32(
CCallHelpers::NotEqual, GPRInfo::nonArgGPR1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
jit.loadPtr(CCallHelpers::Address(GPRInfo::nonArgGPR0, JSCell::structureOffset()), GPRInfo::nonArgGPR2);
slowCase.append(
jit.branchPtr(
CCallHelpers::NotEqual,
CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()),
CCallHelpers::TrustedImmPtr(&JSFunction::s_info)));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfExecutable()),
GPRInfo::nonArgGPR2);
slowCase.append(
jit.branch32(
CCallHelpers::LessThan,
CCallHelpers::Address(
GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)),
CCallHelpers::TrustedImm32(0)));
// Now we know that we have a CodeBlock, and we're committed to making a fast
// call.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfScopeChain()),
GPRInfo::nonArgGPR1);
#if USE(JSVALUE64)
jit.storePtr(
GPRInfo::nonArgGPR1,
CCallHelpers::Address(
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain));
#else
jit.storePtr(
GPRInfo::nonArgGPR1,
CCallHelpers::Address(
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain +
OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
jit.store32(
CCallHelpers::TrustedImm32(JSValue::CellTag),
CCallHelpers::Address(
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain +
OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
#endif
jit.loadPtr(
CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)),
GPRInfo::regT0);
// Make a tail call. This will return back to DFG code.
emitPointerValidation(jit, GPRInfo::regT0);
jit.jump(GPRInfo::regT0);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
slowPathFor(jit, globalData, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct);
LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
("DFG virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
}
示例10: fixFunctionBasedOnStackMaps
//.........这里部分代码省略.........
dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");
auto iter = recordMap.find(exit.m_stackmapID);
if (iter == recordMap.end()) {
// It was optimized out.
continue;
}
info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
for (unsigned j = exit.m_values.size(); j--;)
exit.m_values[j] = exit.m_values[j].withLocalsOffset(localsOffset);
for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
materialization->accountForLocalsOffset(localsOffset);
if (verboseCompilationEnabled()) {
DumpContext context;
dataLog(" Exit values: ", inContext(exit.m_values, &context), "\n");
if (!exit.m_materializations.isEmpty()) {
dataLog(" Materializations: \n");
for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
dataLog(" Materialize(", pointerDump(materialization), ")\n");
}
}
}
state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
}
if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) {
CCallHelpers slowPathJIT(&vm, codeBlock);
CCallHelpers::JumpList exceptionTarget;
for (unsigned i = state.getByIds.size(); i--;) {
GetByIdDescriptor& getById = state.getByIds[i];
if (verboseCompilationEnabled())
dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
auto iter = recordMap.find(getById.stackmapID());
if (iter == recordMap.end()) {
// It was optimized out.
continue;
}
CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[getById.callSiteIndex().bits()];
for (unsigned i = 0; i < iter->value.size(); ++i) {
StackMaps::Record& record = iter->value[i];
RegisterSet usedRegisters = usedRegistersFor(record);
GPRReg result = record.locations[0].directGPR();
GPRReg base = record.locations[1].directGPR();
JITGetByIdGenerator gen(
codeBlock, codeOrigin, getById.callSiteIndex(), usedRegisters, JSValueRegs(base),
JSValueRegs(result), NeedToSpill);
MacroAssembler::Label begin = slowPathJIT.label();
MacroAssembler::Call call = callOperation(
state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid());
示例11: link
//.........这里部分代码省略.........
out.reset();
if (node->origin.semantic.isSet())
lastNode = node;
}
}
dumpContext.dump(out, prefix);
compilation->addDescription(Profiler::OriginStack(), out.toCString());
out.reset();
out.print(" Disassembly:\n");
#if FTL_USES_B3
out.print(" <not implemented yet>\n");
#else
for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
if (state.codeSectionNames[i] != SECTION_NAME("text"))
continue;
ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
disassemble(
MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
" ", out, LLVMSubset);
}
#endif
compilation->addDescription(Profiler::OriginStack(), out.toCString());
out.reset();
state.jitCode->common.compilation = compilation;
}
switch (graph.m_plan.mode) {
case FTLMode: {
CCallHelpers::JumpList mainPathJumps;
jit.load32(
frame.withOffset(sizeof(Register) * JSStack::ArgumentCount),
GPRInfo::regT1);
mainPathJumps.append(jit.branch32(
CCallHelpers::AboveOrEqual, GPRInfo::regT1,
CCallHelpers::TrustedImm32(codeBlock->numParameters())));
jit.emitFunctionPrologue();
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
jit.store32(
CCallHelpers::TrustedImm32(CallSiteIndex(0).bits()),
CCallHelpers::tagFor(JSStack::ArgumentCount));
jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
CCallHelpers::Call callArityCheck = jit.call();
#if !ASSERT_DISABLED
// FIXME: need to make this call register with exception handling somehow. This is
// part of a bigger problem: FTL should be able to handle exceptions.
// https://bugs.webkit.org/show_bug.cgi?id=113622
// Until then, use a JIT ASSERT.
jit.load64(vm.addressOfException(), GPRInfo::regT1);
jit.jitAssertIsNull(GPRInfo::regT1);
#endif
jit.move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
jit.emitFunctionEpilogue();
mainPathJumps.append(jit.branchTest32(CCallHelpers::Zero, GPRInfo::argumentGPR0));
jit.emitFunctionPrologue();
CCallHelpers::Call callArityFixup = jit.call();
jit.emitFunctionEpilogue();
mainPathJumps.append(jit.jump());
linkBuffer = std::make_unique<LinkBuffer>(vm, jit, codeBlock, JITCompilationCanFail);
if (linkBuffer->didFailToAllocate()) {
示例12: virtualThunkFor
// FIXME: We should distinguish between a megamorphic virtual call vs. a slow
// path virtual call so that we can enable fast tail calls for megamorphic
// virtual calls by using the shuffler.
// https://bugs.webkit.org/show_bug.cgi?id=148831
MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
{
// The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
// The return address is on the stack, or in the link register. We will hence
// jump to the callee, or save the return address to the call frame while we
// make a C++ function call to the appropriate JIT operation.
CCallHelpers jit(vm);
CCallHelpers::JumpList slowCase;
// This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
// slow path execution for the profiler.
jit.add32(
CCallHelpers::TrustedImm32(1),
CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
slowCase.append(
jit.branchTest64(
CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
#else
slowCase.append(
jit.branch32(
CCallHelpers::NotEqual, GPRInfo::regT1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
slowCase.append(
jit.branchPtr(
CCallHelpers::NotEqual,
CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
CCallHelpers::TrustedImmPtr(JSFunction::info())));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
GPRInfo::regT4);
jit.loadPtr(
CCallHelpers::Address(
GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
callLinkInfo.specializationKind())),
GPRInfo::regT4);
slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
// Now we know that we have a CodeBlock, and we're committed to making a fast
// call.
// Make a tail call. This will return back to JIT code.
emitPointerValidation(jit, GPRInfo::regT4);
if (callLinkInfo.isTailCall()) {
jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
jit.prepareForTailCallSlow(GPRInfo::regT4);
}
jit.jump(GPRInfo::regT4);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
slowPathFor(jit, vm, operationVirtualCall);
LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
("Virtual %s slow path thunk",
callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
}