本文整理汇总了C++中InlineCallFrame类的典型用法代码示例。如果您正苦于以下问题:C++ InlineCallFrame类的具体用法?C++ InlineCallFrame怎么用?C++ InlineCallFrame使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了InlineCallFrame类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: emitCodeToGetArgumentsArrayLength
Node* emitCodeToGetArgumentsArrayLength(
InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin)
{
Graph& graph = insertionSet.graph();
DFG_ASSERT(
graph, arguments,
arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments
|| arguments->op() == CreateClonedArguments || arguments->op() == PhantomDirectArguments
|| arguments->op() == PhantomClonedArguments);
InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame;
if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
return insertionSet.insertConstant(
nodeIndex, origin, jsNumber(inlineCallFrame->arguments.size() - 1));
}
Node* argumentCount;
if (!inlineCallFrame)
argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32, GetArgumentCount, origin);
else {
VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount);
argumentCount = insertionSet.insertNode(
nodeIndex, SpecInt32, GetStack, origin,
OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32)));
}
return insertionSet.insertNode(
nodeIndex, SpecInt32, ArithSub, origin, OpInfo(Arith::Unchecked),
Edge(argumentCount, Int32Use),
insertionSet.insertConstantForUse(
nodeIndex, origin, jsNumber(1), Int32Use));
}
示例2: ASSERT
void StackVisitor::readInlinedFrame(CallFrame* callFrame, CodeOrigin* codeOrigin)
{
ASSERT(codeOrigin);
int frameOffset = inlinedFrameOffset(codeOrigin);
bool isInlined = !!frameOffset;
if (isInlined) {
InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;
m_frame.m_callFrame = callFrame;
m_frame.m_inlineCallFrame = inlineCallFrame;
if (inlineCallFrame->argumentCountRegister.isValid())
m_frame.m_argumentCountIncludingThis = callFrame->r(inlineCallFrame->argumentCountRegister.offset()).unboxedInt32();
else
m_frame.m_argumentCountIncludingThis = inlineCallFrame->arguments.size();
m_frame.m_codeBlock = inlineCallFrame->baselineCodeBlock();
m_frame.m_bytecodeOffset = codeOrigin->bytecodeIndex;
JSFunction* callee = inlineCallFrame->calleeForCallFrame(callFrame);
m_frame.m_callee = callee;
ASSERT(m_frame.callee());
// The callerFrame just needs to be non-null to indicate that we
// haven't reached the last frame yet. Setting it to the root
// frame (i.e. the callFrame that this inlined frame is called from)
// would work just fine.
m_frame.m_callerFrame = callFrame;
return;
}
readNonInlinedFrame(callFrame, codeOrigin);
}
示例3: emitCodeToGetArgumentsArrayLength
Node* emitCodeToGetArgumentsArrayLength(
InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin)
{
Graph& graph = insertionSet.graph();
DFG_ASSERT(
graph, arguments,
arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments
|| arguments->op() == CreateClonedArguments || arguments->op() == CreateRest
|| arguments->op() == PhantomDirectArguments || arguments->op() == PhantomClonedArguments || arguments->op() == PhantomCreateRest);
InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame;
unsigned numberOfArgumentsToSkip = 0;
if (arguments->op() == CreateRest || arguments->op() == PhantomCreateRest)
numberOfArgumentsToSkip = arguments->numberOfArgumentsToSkip();
if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
unsigned argumentsSize = inlineCallFrame->arguments.size() - 1;
if (argumentsSize >= numberOfArgumentsToSkip)
argumentsSize -= numberOfArgumentsToSkip;
else
argumentsSize = 0;
return insertionSet.insertConstant(
nodeIndex, origin, jsNumber(argumentsSize));
}
Node* argumentCount;
if (!inlineCallFrame)
argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32Only, GetArgumentCountIncludingThis, origin);
else {
VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount);
argumentCount = insertionSet.insertNode(
nodeIndex, SpecInt32Only, GetStack, origin,
OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32)));
}
Node* result = insertionSet.insertNode(
nodeIndex, SpecInt32Only, ArithSub, origin, OpInfo(Arith::Unchecked),
Edge(argumentCount, Int32Use),
insertionSet.insertConstantForUse(
nodeIndex, origin, jsNumber(1 + numberOfArgumentsToSkip), Int32Use));
if (numberOfArgumentsToSkip) {
// The above subtraction may produce a negative number if this number is non-zero. We correct that here.
result = insertionSet.insertNode(
nodeIndex, SpecInt32Only, ArithMax, origin,
Edge(result, Int32Use),
insertionSet.insertConstantForUse(nodeIndex, origin, jsNumber(0), Int32Use));
result->setResult(NodeResultInt32);
}
return result;
}
示例4: ENABLE
void StackVisitor::gotoNextFrame()
{
#if ENABLE(DFG_JIT)
if (m_frame.isInlinedFrame()) {
InlineCallFrame* inlineCallFrame = m_frame.inlineCallFrame();
CodeOrigin* callerCodeOrigin = inlineCallFrame->getCallerSkippingDeadFrames();
if (!callerCodeOrigin) {
while (inlineCallFrame) {
readInlinedFrame(m_frame.callFrame(), &inlineCallFrame->directCaller);
inlineCallFrame = m_frame.inlineCallFrame();
}
m_frame.m_VMEntryFrame = m_frame.m_CallerVMEntryFrame;
readFrame(m_frame.callerFrame());
} else
readInlinedFrame(m_frame.callFrame(), callerCodeOrigin);
return;
}
#endif // ENABLE(DFG_JIT)
m_frame.m_VMEntryFrame = m_frame.m_CallerVMEntryFrame;
readFrame(m_frame.callerFrame());
}
示例5: ASSERT
void StackIterator::readInlinedFrame(CallFrame* callFrame, CodeOrigin* codeOrigin)
{
ASSERT(codeOrigin);
ASSERT(!callFrame->hasHostCallFrameFlag());
unsigned frameOffset = inlinedFrameOffset(codeOrigin);
bool isInlined = !!frameOffset;
if (isInlined) {
InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;
m_frame.m_callFrame = callFrame;
m_frame.m_inlineCallFrame = inlineCallFrame;
m_frame.m_argumentCountIncludingThis = inlineCallFrame->arguments.size();
m_frame.m_codeBlock = inlineCallFrame->baselineCodeBlock();
m_frame.m_bytecodeOffset = codeOrigin->bytecodeIndex;
JSFunction* callee = inlineCallFrame->callee.get();
if (callee) {
m_frame.m_scope = callee->scope();
m_frame.m_callee = callee;
} else {
CallFrame* inlinedFrame = callFrame + frameOffset;
m_frame.m_scope = inlinedFrame->scope();
m_frame.m_callee = inlinedFrame->callee();
}
ASSERT(m_frame.scope());
ASSERT(m_frame.callee());
// The callerFrame just needs to be non-null to indicate that we
// haven't reached the last frame yet. Setting it to the root
// frame (i.e. the callFrame that this inlined frame is called from)
// would work just fine.
m_frame.m_callerFrame = callFrame;
return;
}
readNonInlinedFrame(callFrame, codeOrigin);
}
示例6: switch
void LocalOSRAvailabilityCalculator::executeNode(Node* node)
{
switch (node->op()) {
case PutStack: {
StackAccessData* data = node->stackAccessData();
m_availability.m_locals.operand(data->local).setFlush(data->flushedAt());
break;
}
case KillStack: {
m_availability.m_locals.operand(node->unlinkedLocal()).setFlush(FlushedAt(ConflictingFlush));
break;
}
case GetStack: {
StackAccessData* data = node->stackAccessData();
m_availability.m_locals.operand(data->local) = Availability(node, data->flushedAt());
break;
}
case MovHint: {
m_availability.m_locals.operand(node->unlinkedLocal()).setNode(node->child1().node());
break;
}
case ZombieHint: {
m_availability.m_locals.operand(node->unlinkedLocal()).setNodeUnavailable();
break;
}
case LoadVarargs:
case ForwardVarargs: {
LoadVarargsData* data = node->loadVarargsData();
m_availability.m_locals.operand(data->count) =
Availability(FlushedAt(FlushedInt32, data->machineCount));
for (unsigned i = data->limit; i--;) {
m_availability.m_locals.operand(VirtualRegister(data->start.offset() + i)) =
Availability(FlushedAt(FlushedJSValue, VirtualRegister(data->machineStart.offset() + i)));
}
break;
}
case PhantomCreateRest:
case PhantomDirectArguments:
case PhantomClonedArguments: {
InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame;
if (!inlineCallFrame) {
// We don't need to record anything about how the arguments are to be recovered. It's just a
// given that we can read them from the stack.
break;
}
unsigned numberOfArgumentsToSkip = 0;
if (node->op() == PhantomCreateRest)
numberOfArgumentsToSkip = node->numberOfArgumentsToSkip();
if (inlineCallFrame->isVarargs()) {
// Record how to read each argument and the argument count.
Availability argumentCount =
m_availability.m_locals.operand(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount);
m_availability.m_heap.set(PromotedHeapLocation(ArgumentCountPLoc, node), argumentCount);
}
if (inlineCallFrame->isClosureCall) {
Availability callee = m_availability.m_locals.operand(
inlineCallFrame->stackOffset + CallFrameSlot::callee);
m_availability.m_heap.set(PromotedHeapLocation(ArgumentsCalleePLoc, node), callee);
}
for (unsigned i = numberOfArgumentsToSkip; i < inlineCallFrame->arguments.size() - 1; ++i) {
Availability argument = m_availability.m_locals.operand(
inlineCallFrame->stackOffset + CallFrame::argumentOffset(i));
m_availability.m_heap.set(PromotedHeapLocation(ArgumentPLoc, node, i), argument);
}
break;
}
case PutHint: {
m_availability.m_heap.set(
PromotedHeapLocation(node->child1().node(), node->promotedLocationDescriptor()),
Availability(node->child2().node()));
break;
}
case PhantomSpread:
m_availability.m_heap.set(PromotedHeapLocation(SpreadPLoc, node), Availability(node->child1().node()));
break;
case PhantomNewArrayWithSpread:
for (unsigned i = 0; i < node->numChildren(); i++) {
Node* child = m_graph.varArgChild(node, i).node();
m_availability.m_heap.set(PromotedHeapLocation(NewArrayWithSpreadArgumentPLoc, node, i), Availability(child));
}
break;
default:
break;
}
//.........这里部分代码省略.........
示例7: reifyInlinedCallFrames
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
{
ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
CodeOrigin codeOrigin;
for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
void* jumpTarget = nullptr;
void* trueReturnPC = nullptr;
unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex;
switch (inlineCallFrame->kind) {
case InlineCallFrame::Call:
case InlineCallFrame::Construct:
case InlineCallFrame::CallVarargs:
case InlineCallFrame::ConstructVarargs: {
CallLinkInfo* callLinkInfo =
baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
RELEASE_ASSERT(callLinkInfo);
jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
break;
}
case InlineCallFrame::GetterCall:
case InlineCallFrame::SetterCall: {
StructureStubInfo* stubInfo =
baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
RELEASE_ASSERT(stubInfo);
switch (inlineCallFrame->kind) {
case InlineCallFrame::GetterCall:
jumpTarget = jit.vm()->getCTIStub(baselineGetterReturnThunkGenerator).code().executableAddress();
break;
case InlineCallFrame::SetterCall:
jumpTarget = jit.vm()->getCTIStub(baselineSetterReturnThunkGenerator).code().executableAddress();
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
trueReturnPC = stubInfo->callReturnLocation.labelAtOffset(
stubInfo->patch.deltaCallToDone).executableAddress();
break;
} }
GPRReg callerFrameGPR;
if (inlineCallFrame->caller.inlineCallFrame) {
jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
callerFrameGPR = GPRInfo::regT3;
} else
callerFrameGPR = GPRInfo::callFrameRegister;
jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
if (trueReturnPC)
jit.storePtr(AssemblyHelpers::TrustedImmPtr(trueReturnPC), AssemblyHelpers::addressFor(inlineCallFrame->stackOffset + virtualRegisterForArgument(inlineCallFrame->arguments.size()).offset()));
jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
if (!inlineCallFrame->isVarargs())
jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
#if USE(JSVALUE64)
jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
if (!inlineCallFrame->isClosureCall)
jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
#else // USE(JSVALUE64) // so this is the 32-bit part
jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex;
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
if (!inlineCallFrame->isClosureCall)
jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
}
#if USE(JSVALUE64)
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
#else
Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex;
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
#endif
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
}
示例8: emitRestoreArguments
void OSRExitCompiler::emitRestoreArguments(const Operands<ValueRecovery>& operands)
{
HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
int operand = operands.operandForIndex(index);
if (recovery.technique() != DirectArgumentsThatWereNotCreated
&& recovery.technique() != ClonedArgumentsThatWereNotCreated)
continue;
MinifiedID id = recovery.nodeID();
auto iter = alreadyAllocatedArguments.find(id);
if (iter != alreadyAllocatedArguments.end()) {
JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
m_jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
m_jit.storeValue(regs, CCallHelpers::addressFor(operand));
continue;
}
InlineCallFrame* inlineCallFrame =
m_jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();
int stackOffset;
if (inlineCallFrame)
stackOffset = inlineCallFrame->stackOffset;
else
stackOffset = 0;
if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
m_jit.loadPtr(
AssemblyHelpers::addressFor(stackOffset + JSStack::Callee),
GPRInfo::regT0);
} else {
m_jit.move(
AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
GPRInfo::regT0);
}
if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
m_jit.load32(
AssemblyHelpers::payloadFor(stackOffset + JSStack::ArgumentCount),
GPRInfo::regT1);
} else {
m_jit.move(
AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()),
GPRInfo::regT1);
}
m_jit.setupArgumentsWithExecState(
AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
switch (recovery.technique()) {
case DirectArgumentsThatWereNotCreated:
m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
break;
case ClonedArgumentsThatWereNotCreated:
m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
m_jit.call(GPRInfo::nonArgGPR0);
m_jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
alreadyAllocatedArguments.add(id, operand);
}
}
示例9: run
bool run()
{
// This enumerates the locals that we actually care about and packs them. So for example
// if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We
// treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal,
// Flush, PhantomLocal).
BitVector usedLocals;
// Collect those variables that are used from IR.
bool hasNodesThatNeedFixup = false;
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
for (unsigned nodeIndex = block->size(); nodeIndex--;) {
Node* node = block->at(nodeIndex);
switch (node->op()) {
case GetLocal:
case SetLocal:
case Flush:
case PhantomLocal: {
VariableAccessData* variable = node->variableAccessData();
if (variable->local().isArgument())
break;
usedLocals.set(variable->local().toLocal());
break;
}
case GetLocalUnlinked: {
VirtualRegister operand = node->unlinkedLocal();
if (operand.isArgument())
break;
usedLocals.set(operand.toLocal());
hasNodesThatNeedFixup = true;
break;
}
case LoadVarargs:
case ForwardVarargs: {
LoadVarargsData* data = node->loadVarargsData();
if (data->count.isLocal())
usedLocals.set(data->count.toLocal());
if (data->start.isLocal()) {
// This part really relies on the contiguity of stack layout
// assignments.
ASSERT(VirtualRegister(data->start.offset() + data->limit - 1).isLocal());
for (unsigned i = data->limit; i--;)
usedLocals.set(VirtualRegister(data->start.offset() + i).toLocal());
} // the else case shouldn't happen.
hasNodesThatNeedFixup = true;
break;
}
case PutStack:
case GetStack: {
StackAccessData* stack = node->stackAccessData();
if (stack->local.isArgument())
break;
usedLocals.set(stack->local.toLocal());
break;
}
default:
break;
}
}
}
for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames->begin(); !!iter; ++iter) {
InlineCallFrame* inlineCallFrame = *iter;
if (inlineCallFrame->isVarargs()) {
usedLocals.set(VirtualRegister(
JSStack::ArgumentCount + inlineCallFrame->stackOffset).toLocal());
}
for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
usedLocals.set(VirtualRegister(
virtualRegisterForArgument(argument).offset() +
inlineCallFrame->stackOffset).toLocal());
}
}
Vector<unsigned> allocation(usedLocals.size());
m_graph.m_nextMachineLocal = 0;
for (unsigned i = 0; i < usedLocals.size(); ++i) {
if (!usedLocals.get(i)) {
allocation[i] = UINT_MAX;
continue;
}
allocation[i] = m_graph.m_nextMachineLocal++;
}
for (unsigned i = m_graph.m_variableAccessData.size(); i--;) {
VariableAccessData* variable = &m_graph.m_variableAccessData[i];
if (!variable->isRoot())
continue;
//.........这里部分代码省略.........
示例10: reifyInlinedCallFrames
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
{
ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
CodeOrigin codeOrigin;
for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex;
CallLinkInfo& callLinkInfo = baselineCodeBlockForCaller->getCallLinkInfo(callBytecodeIndex);
void* jumpTarget = callLinkInfo.callReturnLocation.executableAddress();
GPRReg callerFrameGPR;
if (inlineCallFrame->caller.inlineCallFrame) {
jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
callerFrameGPR = GPRInfo::regT3;
} else
callerFrameGPR = GPRInfo::callFrameRegister;
#if USE(JSVALUE64)
jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
if (!inlineCallFrame->isClosureCall)
jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
if (!inlineCallFrame->isClosureCall)
jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
// Leave the captured arguments in regT3.
if (baselineCodeBlock->usesArguments())
jit.loadPtr(AssemblyHelpers::addressFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
#else // USE(JSVALUE64) // so this is the 32-bit part
jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
if (!inlineCallFrame->isClosureCall)
jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex;
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
if (!inlineCallFrame->isClosureCall)
jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
// Leave the captured arguments in regT3.
if (baselineCodeBlock->usesArguments())
jit.loadPtr(AssemblyHelpers::payloadFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
if (baselineCodeBlock->usesArguments()) {
AssemblyHelpers::Jump noArguments = jit.branchTestPtr(AssemblyHelpers::Zero, GPRInfo::regT3);
jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
jit.storePtr(GPRInfo::regT0, AssemblyHelpers::Address(GPRInfo::regT3, Arguments::offsetOfRegisters()));
noArguments.link(&jit);
}
}
#if USE(JSVALUE64)
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
#else
Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex;
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
#endif
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
}