本文整理汇总了C++中ccallhelpers::JumpList::link方法的典型用法代码示例。如果您正苦于以下问题:C++ JumpList::link方法的具体用法?C++ JumpList::link怎么用?C++ JumpList::link使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ccallhelpers::JumpList
的用法示例。
在下文中一共展示了JumpList::link方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: dispatch
static void dispatch(CCallHelpers& jit, FTL::State* state, const B3::StackmapGenerationParams& params, DFG::Node* node, Box<CCallHelpers::JumpList> exceptions, CCallHelpers::JumpList from, OperationType operation, ResultType result, Arguments arguments, std::index_sequence<ArgumentsIndex...>)
{
CCallHelpers::Label done = jit.label();
params.addLatePath([=] (CCallHelpers& jit) {
AllowMacroScratchRegisterUsage allowScratch(jit);
from.link(&jit);
callOperation(
*state, params.unavailableRegisters(), jit, node->origin.semantic,
exceptions.get(), operation, extractResult(result), std::get<ArgumentsIndex>(arguments)...);
jit.jump().linkTo(done, &jit);
});
}
示例2: emit
void JSCallVarargs::emit(CCallHelpers& jit, State& state, int32_t spillSlotsOffset, int32_t osrExitFromGenericUnwindSpillSlots)
{
// We are passed three pieces of information:
// - The callee.
// - The arguments object, if it's not a forwarding call.
// - The "this" value, if it's a constructor call.
CallVarargsData* data = m_node->callVarargsData();
GPRReg calleeGPR = GPRInfo::argumentGPR0;
GPRReg argumentsGPR = InvalidGPRReg;
GPRReg thisGPR = InvalidGPRReg;
bool forwarding = false;
switch (m_node->op()) {
case CallVarargs:
case TailCallVarargs:
case TailCallVarargsInlinedCaller:
case ConstructVarargs:
argumentsGPR = GPRInfo::argumentGPR1;
thisGPR = GPRInfo::argumentGPR2;
break;
case CallForwardVarargs:
case TailCallForwardVarargs:
case TailCallForwardVarargsInlinedCaller:
case ConstructForwardVarargs:
thisGPR = GPRInfo::argumentGPR1;
forwarding = true;
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
const unsigned calleeSpillSlot = 0;
const unsigned argumentsSpillSlot = 1;
const unsigned thisSpillSlot = 2;
const unsigned stackPointerSpillSlot = 3;
// Get some scratch registers.
RegisterSet usedRegisters;
usedRegisters.merge(RegisterSet::stackRegisters());
usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
usedRegisters.merge(RegisterSet::calleeSaveRegisters());
usedRegisters.set(calleeGPR);
if (argumentsGPR != InvalidGPRReg)
usedRegisters.set(argumentsGPR);
ASSERT(thisGPR);
usedRegisters.set(thisGPR);
ScratchRegisterAllocator allocator(usedRegisters);
GPRReg scratchGPR1 = allocator.allocateScratchGPR();
GPRReg scratchGPR2 = allocator.allocateScratchGPR();
GPRReg scratchGPR3 = allocator.allocateScratchGPR();
RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
auto computeUsedStack = [&] (GPRReg targetGPR, unsigned extra) {
if (isARM64()) {
// Have to do this the weird way because $sp on ARM64 means zero when used in a subtraction.
jit.move(CCallHelpers::stackPointerRegister, targetGPR);
jit.negPtr(targetGPR);
jit.addPtr(GPRInfo::callFrameRegister, targetGPR);
} else {
jit.move(GPRInfo::callFrameRegister, targetGPR);
jit.subPtr(CCallHelpers::stackPointerRegister, targetGPR);
}
if (extra)
jit.subPtr(CCallHelpers::TrustedImm32(extra), targetGPR);
jit.urshiftPtr(CCallHelpers::Imm32(3), targetGPR);
};
auto callWithExceptionCheck = [&] (void* callee) {
jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
jit.call(GPRInfo::nonPreservedNonArgumentGPR);
m_exceptions.append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
};
if (isARM64()) {
jit.move(CCallHelpers::stackPointerRegister, scratchGPR1);
jit.storePtr(scratchGPR1, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));
} else
jit.storePtr(CCallHelpers::stackPointerRegister, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));
unsigned extraStack = sizeof(CallerFrameAndPC) +
WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*));
if (forwarding) {
CCallHelpers::JumpList slowCase;
computeUsedStack(scratchGPR2, 0);
emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, m_node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
CCallHelpers::Jump done = jit.jump();
slowCase.link(&jit);
jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister);
jit.setupArgumentsExecState();
callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
//.........这里部分代码省略.........
示例3: emitDOMJITGetter
//.........这里部分代码省略.........
allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
#endif
allocator.lock(valueRegs);
allocator.lock(scratchGPR);
GPRReg paramBaseGPR = InvalidGPRReg;
GPRReg paramGlobalObjectGPR = InvalidGPRReg;
JSValueRegs paramValueRegs = valueRegs;
GPRReg remainingScratchGPR = InvalidGPRReg;
// valueRegs and baseForGetGPR may be the same. For example, in Baseline JIT, we pass the same regT0 for baseGPR and valueRegs.
// In FTL, there is no constraint that the baseForGetGPR interferes with the result. To make implementation simple in
// Snippet, Snippet assumes that result registers always early interfere with input registers, in this case,
// baseForGetGPR. So we move baseForGetGPR to the other register if baseForGetGPR == valueRegs.
if (baseForGetGPR != valueRegs.payloadGPR()) {
paramBaseGPR = baseForGetGPR;
if (!snippet->requireGlobalObject)
remainingScratchGPR = scratchGPR;
else
paramGlobalObjectGPR = scratchGPR;
} else {
jit.move(valueRegs.payloadGPR(), scratchGPR);
paramBaseGPR = scratchGPR;
if (snippet->requireGlobalObject)
paramGlobalObjectGPR = allocator.allocateScratchGPR();
}
JSGlobalObject* globalObjectForDOMJIT = structure()->globalObject();
regs.append(paramValueRegs);
regs.append(paramBaseGPR);
if (snippet->requireGlobalObject) {
ASSERT(paramGlobalObjectGPR != InvalidGPRReg);
regs.append(SnippetParams::Value(paramGlobalObjectGPR, globalObjectForDOMJIT));
}
if (snippet->numGPScratchRegisters) {
unsigned i = 0;
if (remainingScratchGPR != InvalidGPRReg) {
gpScratch.append(remainingScratchGPR);
++i;
}
for (; i < snippet->numGPScratchRegisters; ++i)
gpScratch.append(allocator.allocateScratchGPR());
}
for (unsigned i = 0; i < snippet->numFPScratchRegisters; ++i)
fpScratch.append(allocator.allocateScratchFPR());
// Let's store the reused registers to the stack. After that, we can use allocated scratch registers.
ScratchRegisterAllocator::PreservedState preservedState =
allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
if (verbose) {
dataLog("baseGPR = ", baseGPR, "\n");
dataLog("valueRegs = ", valueRegs, "\n");
dataLog("scratchGPR = ", scratchGPR, "\n");
dataLog("paramBaseGPR = ", paramBaseGPR, "\n");
if (paramGlobalObjectGPR != InvalidGPRReg)
dataLog("paramGlobalObjectGPR = ", paramGlobalObjectGPR, "\n");
dataLog("paramValueRegs = ", paramValueRegs, "\n");
for (unsigned i = 0; i < snippet->numGPScratchRegisters; ++i)
dataLog("gpScratch[", i, "] = ", gpScratch[i], "\n");
}
if (snippet->requireGlobalObject)
jit.move(CCallHelpers::TrustedImmPtr(globalObjectForDOMJIT), paramGlobalObjectGPR);
// We just spill the registers used in Snippet here. For not spilled registers here explicitly,
// they must be in the used register set passed by the callers (Baseline, DFG, and FTL) if they need to be kept.
// Some registers can be locked, but not in the used register set. For example, the caller could make baseGPR
// same to valueRegs, and not include it in the used registers since it will be changed.
RegisterSet registersToSpillForCCall;
for (auto& value : regs) {
SnippetReg reg = value.reg();
if (reg.isJSValueRegs())
registersToSpillForCCall.set(reg.jsValueRegs());
else if (reg.isGPR())
registersToSpillForCCall.set(reg.gpr());
else
registersToSpillForCCall.set(reg.fpr());
}
for (GPRReg reg : gpScratch)
registersToSpillForCCall.set(reg);
for (FPRReg reg : fpScratch)
registersToSpillForCCall.set(reg);
registersToSpillForCCall.exclude(RegisterSet::registersToNotSaveForCCall());
AccessCaseSnippetParams params(state.m_vm, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
snippet->generator()->run(jit, params);
allocator.restoreReusedRegistersByPopping(jit, preservedState);
state.succeed();
CCallHelpers::JumpList exceptions = params.emitSlowPathCalls(state, registersToSpillForCCall, jit);
if (!exceptions.empty()) {
exceptions.link(&jit);
allocator.restoreReusedRegistersByPopping(jit, preservedState);
state.emitExplicitExceptionHandler();
}
}
示例4: virtualForThunkGenerator
static MacroAssemblerCodeRef virtualForThunkGenerator(
VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
{
// The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
// The return address is on the stack, or in the link register. We will hence
// jump to the callee, or save the return address to the call frame while we
// make a C++ function call to the appropriate JIT operation.
CCallHelpers jit(vm);
CCallHelpers::JumpList slowCase;
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT2);
slowCase.append(
jit.branchTest64(
CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT2));
#else
slowCase.append(
jit.branch32(
CCallHelpers::NotEqual, GPRInfo::regT1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT2, GPRInfo::regT1);
slowCase.append(
jit.branchPtr(
CCallHelpers::NotEqual,
CCallHelpers::Address(GPRInfo::regT2, Structure::classInfoOffset()),
CCallHelpers::TrustedImmPtr(JSFunction::info())));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
GPRInfo::regT2);
jit.loadPtr(
CCallHelpers::Address(
GPRInfo::regT2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)),
GPRInfo::regT2);
slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT2));
// Now we know that we have a CodeBlock, and we're committed to making a fast
// call.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()),
GPRInfo::regT1);
#if USE(JSVALUE64)
jit.emitPutToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
#else
jit.emitPutPayloadToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
jit.emitPutTagToCallFrameHeaderBeforePrologue(CCallHelpers::TrustedImm32(JSValue::CellTag),
JSStack::ScopeChain);
#endif
// Make a tail call. This will return back to JIT code.
emitPointerValidation(jit, GPRInfo::regT2);
jit.jump(GPRInfo::regT2);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
slowPathFor(jit, vm, operationVirtualFor(kind, registers));
LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
}
示例5: virtualForThunkGenerator
static MacroAssemblerCodeRef virtualForThunkGenerator(
JSGlobalData* globalData, CodeSpecializationKind kind)
{
// The return address is on the stack, or in the link register. We will hence
// jump to the callee, or save the return address to the call frame while we
// make a C++ function call to the appropriate DFG operation.
CCallHelpers jit(globalData);
CCallHelpers::JumpList slowCase;
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
slowCase.append(
jit.branchTestPtr(
CCallHelpers::NonZero, GPRInfo::nonArgGPR0, GPRInfo::tagMaskRegister));
#else
slowCase.append(
jit.branch32(
CCallHelpers::NotEqual, GPRInfo::nonArgGPR1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
jit.loadPtr(CCallHelpers::Address(GPRInfo::nonArgGPR0, JSCell::structureOffset()), GPRInfo::nonArgGPR2);
slowCase.append(
jit.branchPtr(
CCallHelpers::NotEqual,
CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()),
CCallHelpers::TrustedImmPtr(&JSFunction::s_info)));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfExecutable()),
GPRInfo::nonArgGPR2);
slowCase.append(
jit.branch32(
CCallHelpers::LessThan,
CCallHelpers::Address(
GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)),
CCallHelpers::TrustedImm32(0)));
// Now we know that we have a CodeBlock, and we're committed to making a fast
// call.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfScopeChain()),
GPRInfo::nonArgGPR1);
#if USE(JSVALUE64)
jit.storePtr(
GPRInfo::nonArgGPR1,
CCallHelpers::Address(
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain));
#else
jit.storePtr(
GPRInfo::nonArgGPR1,
CCallHelpers::Address(
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain +
OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
jit.store32(
CCallHelpers::TrustedImm32(JSValue::CellTag),
CCallHelpers::Address(
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain +
OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
#endif
jit.loadPtr(
CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)),
GPRInfo::regT0);
// Make a tail call. This will return back to DFG code.
emitPointerValidation(jit, GPRInfo::regT0);
jit.jump(GPRInfo::regT0);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
slowPathFor(jit, globalData, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct);
LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
("DFG virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
}
示例6: fixFunctionBasedOnStackMaps
static void fixFunctionBasedOnStackMaps(
State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
StackMaps::RecordMap& recordMap)
{
Graph& graph = state.graph;
VM& vm = graph.m_vm;
StackMaps stackmaps = jitCode->stackmaps;
int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
int varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
if (inlineCallFrame->argumentCountRegister.isValid())
inlineCallFrame->argumentCountRegister += localsOffset;
for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
inlineCallFrame->arguments[argument] =
inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
}
if (inlineCallFrame->isClosureCall) {
inlineCallFrame->calleeRecovery =
inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
}
if (graph.hasDebuggerEnabled())
codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset);
}
MacroAssembler::Label stackOverflowException;
{
CCallHelpers checkJIT(&vm, codeBlock);
// At this point it's perfectly fair to just blow away all state and restore the
// JS JIT view of the universe.
checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
checkJIT.jumpToExceptionHandler();
stackOverflowException = checkJIT.label();
checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
checkJIT.jumpToExceptionHandler();
auto linkBuffer = std::make_unique<LinkBuffer>(
vm, checkJIT, codeBlock, JITCompilationCanFail);
if (linkBuffer->didFailToAllocate()) {
state.allocationFailed = true;
return;
}
linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
}
ExitThunkGenerator exitThunkGenerator(state);
exitThunkGenerator.emitThunks();
if (exitThunkGenerator.didThings()) {
RELEASE_ASSERT(state.finalizer->osrExit.size());
auto linkBuffer = std::make_unique<LinkBuffer>(
vm, exitThunkGenerator, codeBlock, JITCompilationCanFail);
if (linkBuffer->didFailToAllocate()) {
state.allocationFailed = true;
return;
}
RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
OSRExit& exit = jitCode->osrExit[i];
if (verboseCompilationEnabled())
dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");
auto iter = recordMap.find(exit.m_stackmapID);
if (iter == recordMap.end()) {
// It was optimized out.
continue;
}
info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
for (unsigned j = exit.m_values.size(); j--;)
exit.m_values[j] = exit.m_values[j].withLocalsOffset(localsOffset);
for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
materialization->accountForLocalsOffset(localsOffset);
if (verboseCompilationEnabled()) {
DumpContext context;
//.........这里部分代码省略.........
示例7: virtualThunkFor
// FIXME: We should distinguish between a megamorphic virtual call vs. a slow
// path virtual call so that we can enable fast tail calls for megamorphic
// virtual calls by using the shuffler.
// https://bugs.webkit.org/show_bug.cgi?id=148831
MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
{
// The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
// The return address is on the stack, or in the link register. We will hence
// jump to the callee, or save the return address to the call frame while we
// make a C++ function call to the appropriate JIT operation.
CCallHelpers jit(vm);
CCallHelpers::JumpList slowCase;
// This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
// slow path execution for the profiler.
jit.add32(
CCallHelpers::TrustedImm32(1),
CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
slowCase.append(
jit.branchTest64(
CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
#else
slowCase.append(
jit.branch32(
CCallHelpers::NotEqual, GPRInfo::regT1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
slowCase.append(
jit.branchPtr(
CCallHelpers::NotEqual,
CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
CCallHelpers::TrustedImmPtr(JSFunction::info())));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
GPRInfo::regT4);
jit.loadPtr(
CCallHelpers::Address(
GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
callLinkInfo.specializationKind())),
GPRInfo::regT4);
slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
// Now we know that we have a CodeBlock, and we're committed to making a fast
// call.
// Make a tail call. This will return back to JIT code.
emitPointerValidation(jit, GPRInfo::regT4);
if (callLinkInfo.isTailCall()) {
jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
jit.prepareForTailCallSlow(GPRInfo::regT4);
}
jit.jump(GPRInfo::regT4);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
slowPathFor(jit, vm, operationVirtualCall);
LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
("Virtual %s slow path thunk",
callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
}