本文整理汇总了C++中Operands类的典型用法代码示例。如果您正苦于以下问题:C++ Operands类的具体用法?C++ Operands怎么用?C++ Operands使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Operands类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: reconstruct
void JITCode::reconstruct(
ExecState* exec, CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
Operands<JSValue>& result)
{
Operands<ValueRecovery> recoveries;
reconstruct(codeBlock, codeOrigin, streamIndex, recoveries);
result = Operands<JSValue>(OperandsLike, recoveries);
for (size_t i = result.size(); i--;) {
int operand = result.operandForIndex(i);
if (operandIsArgument(operand)
&& !VirtualRegister(operand).toArgument()
&& codeBlock->codeType() == FunctionCode
&& codeBlock->specializationKind() == CodeForConstruct) {
// Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will
// also never be used. It doesn't matter what we put into the value for this,
// but it has to be an actual value that can be grokked by subsequent DFG passes,
// so we sanitize it here by turning it into Undefined.
result[i] = jsUndefined();
continue;
}
ValueRecovery recovery = recoveries[i];
JSValue value;
switch (recovery.technique()) {
case AlreadyInJSStack:
case AlreadyInJSStackAsUnboxedCell:
case AlreadyInJSStackAsUnboxedBoolean:
value = exec->r(operand).jsValue();
break;
case AlreadyInJSStackAsUnboxedInt32:
value = jsNumber(exec->r(operand).unboxedInt32());
break;
case AlreadyInJSStackAsUnboxedInt52:
value = jsNumber(exec->r(operand).unboxedInt52());
break;
case AlreadyInJSStackAsUnboxedDouble:
value = jsDoubleNumber(exec->r(operand).unboxedDouble());
break;
case Constant:
value = recovery.constant();
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
result[i] = value;
}
}
示例2: clobber
void clobber(const Operands<VariableAccessData*>& live)
{
for (size_t i = live.size(); i--;) {
VariableAccessData* variable = live[i];
if (!variable)
continue;
noticeClobber(variable);
}
}
示例3: prepareOSREntry
void* prepareOSREntry(
ExecState* exec, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock,
unsigned bytecodeIndex, unsigned streamIndex)
{
VM& vm = exec->vm();
CodeBlock* baseline = dfgCodeBlock->baselineVersion();
DFG::JITCode* dfgCode = dfgCodeBlock->jitCode()->dfg();
ForOSREntryJITCode* entryCode = entryCodeBlock->jitCode()->ftlForOSREntry();
if (Options::verboseOSR()) {
dataLog(
"FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at bc#",
bytecodeIndex, ".\n");
}
if (bytecodeIndex != entryCode->bytecodeIndex()) {
if (Options::verboseOSR())
dataLog(" OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex());
return 0;
}
Operands<JSValue> values;
dfgCode->reconstruct(
exec, dfgCodeBlock, CodeOrigin(bytecodeIndex), streamIndex, values);
if (Options::verboseOSR())
dataLog(" Values at entry: ", values, "\n");
for (int argument = values.numberOfArguments(); argument--;) {
RELEASE_ASSERT(
exec->r(virtualRegisterForArgument(argument).offset()).jsValue() == values.argument(argument));
}
RELEASE_ASSERT(
static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeRegisters);
EncodedJSValue* scratch = static_cast<EncodedJSValue*>(
entryCode->entryBuffer()->dataBuffer());
for (int local = values.numberOfLocals(); local--;)
scratch[local] = JSValue::encode(values.local(local));
int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit();
if (!vm.interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(stackFrameSize).offset()])) {
if (Options::verboseOSR())
dataLog(" OSR failed because stack growth failed.\n");
return 0;
}
exec->setCodeBlock(entryCodeBlock);
void* result = entryCode->addressForCall().executableAddress();
if (Options::verboseOSR())
dataLog(" Entry will succeed, going to address", RawPointer(result), "\n");
return result;
}
示例4: checkOperand
void checkOperand(
BlockIndex blockIndex, Operands<size_t>& getLocalPositions,
Operands<size_t>& setLocalPositions, int operand)
{
if (getLocalPositions.operand(operand) == notSet)
return;
if (setLocalPositions.operand(operand) == notSet)
return;
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
VALIDATE(
(block->at(getLocalPositions.operand(operand)),
block->at(setLocalPositions.operand(operand)),
blockIndex),
getLocalPositions.operand(operand) < setLocalPositions.operand(operand));
}
示例5: prepareOSREntry
SUPPRESS_ASAN
void* prepareOSREntry(
ExecState* exec, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock,
unsigned bytecodeIndex, unsigned streamIndex)
{
VM& vm = exec->vm();
CodeBlock* baseline = dfgCodeBlock->baselineVersion();
ExecutableBase* executable = dfgCodeBlock->ownerExecutable();
DFG::JITCode* dfgCode = dfgCodeBlock->jitCode()->dfg();
ForOSREntryJITCode* entryCode = entryCodeBlock->jitCode()->ftlForOSREntry();
if (Options::verboseOSR()) {
dataLog(
"FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at bc#",
bytecodeIndex, ".\n");
}
if (bytecodeIndex)
jsCast<ScriptExecutable*>(executable)->setDidTryToEnterInLoop(true);
if (bytecodeIndex != entryCode->bytecodeIndex()) {
if (Options::verboseOSR())
dataLog(" OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex(), "\n");
return 0;
}
Operands<JSValue> values;
dfgCode->reconstruct(
exec, dfgCodeBlock, CodeOrigin(bytecodeIndex), streamIndex, values);
if (Options::verboseOSR())
dataLog(" Values at entry: ", values, "\n");
for (int argument = values.numberOfArguments(); argument--;) {
JSValue valueOnStack = exec->r(virtualRegisterForArgument(argument).offset()).asanUnsafeJSValue();
JSValue reconstructedValue = values.argument(argument);
if (valueOnStack == reconstructedValue || !argument)
continue;
dataLog("Mismatch between reconstructed values and the the value on the stack for argument arg", argument, " for ", *entryCodeBlock, " at bc#", bytecodeIndex, ":\n");
dataLog(" Value on stack: ", valueOnStack, "\n");
dataLog(" Reconstructed value: ", reconstructedValue, "\n");
RELEASE_ASSERT_NOT_REACHED();
}
RELEASE_ASSERT(
static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeRegisters);
EncodedJSValue* scratch = static_cast<EncodedJSValue*>(
entryCode->entryBuffer()->dataBuffer());
for (int local = values.numberOfLocals(); local--;)
scratch[local] = JSValue::encode(values.local(local));
int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit();
if (!vm.interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(stackFrameSize - 1).offset()])) {
if (Options::verboseOSR())
dataLog(" OSR failed because stack growth failed.\n");
return 0;
}
exec->setCodeBlock(entryCodeBlock);
void* result = entryCode->addressForCall(
vm, executable, ArityCheckNotRequired,
RegisterPreservationNotRequired).executableAddress();
if (Options::verboseOSR())
dataLog(" Entry will succeed, going to address", RawPointer(result), "\n");
return result;
}
示例6: run
bool run()
{
ASSERT(m_graph.m_form == SSA);
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
block->ssa->availabilityAtHead.fill(Availability());
block->ssa->availabilityAtTail.fill(Availability());
}
BasicBlock* root = m_graph.block(0);
for (unsigned argument = root->ssa->availabilityAtHead.numberOfArguments(); argument--;) {
root->ssa->availabilityAtHead.argument(argument) =
Availability::unavailable().withFlush(
FlushedAt(FlushedJSValue, virtualRegisterForArgument(argument)));
}
for (unsigned local = root->ssa->availabilityAtHead.numberOfLocals(); local--;)
root->ssa->availabilityAtHead.local(local) = Availability::unavailable();
if (m_graph.m_plan.mode == FTLForOSREntryMode) {
for (unsigned local = m_graph.m_profiledBlock->m_numCalleeRegisters; local--;) {
root->ssa->availabilityAtHead.local(local) =
Availability::unavailable().withFlush(
FlushedAt(FlushedJSValue, virtualRegisterForLocal(local)));
}
}
// This could be made more efficient by processing blocks in reverse postorder.
Operands<Availability> availability;
bool changed;
do {
changed = false;
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
availability = block->ssa->availabilityAtHead;
for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
Node* node = block->at(nodeIndex);
switch (node->op()) {
case SetLocal: {
VariableAccessData* variable = node->variableAccessData();
availability.operand(variable->local()) =
Availability(node->child1().node(), variable->flushedAt());
break;
}
case GetArgument: {
VariableAccessData* variable = node->variableAccessData();
availability.operand(variable->local()) =
Availability(node, variable->flushedAt());
break;
}
case MovHint:
case MovHintAndCheck: {
VariableAccessData* variable = node->variableAccessData();
availability.operand(variable->local()) =
Availability(node->child1().node());
break;
}
case ZombieHint: {
VariableAccessData* variable = node->variableAccessData();
availability.operand(variable->local()) = Availability::unavailable();
break;
}
default:
break;
}
}
if (availability == block->ssa->availabilityAtTail)
continue;
block->ssa->availabilityAtTail = availability;
changed = true;
for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) {
BasicBlock* successor = block->successor(successorIndex);
for (unsigned i = availability.size(); i--;) {
successor->ssa->availabilityAtHead[i] = availability[i].merge(
successor->ssa->availabilityAtHead[i]);
}
}
}
} while (changed);
return true;
}
示例7: compileOSRExit
void compileOSRExit(ExecState* exec)
{
SamplingRegion samplingRegion("DFG OSR Exit Compilation");
CodeBlock* codeBlock = exec->codeBlock();
ASSERT(codeBlock);
ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
JSGlobalData* globalData = &exec->globalData();
uint32_t exitIndex = globalData->osrExitIndex;
OSRExit& exit = codeBlock->osrExit(exitIndex);
// Make sure all code on our inline stack is JIT compiled. This is necessary since
// we may opt to inline a code block even before it had ever been compiled by the
// JIT, but our OSR exit infrastructure currently only works if the target of the
// OSR exit is JIT code. This could be changed since there is nothing particularly
// hard about doing an OSR exit into the interpreter, but for now this seems to make
// sense in that if we're OSR exiting from inlined code of a DFG code block, then
// probably it's a good sign that the thing we're exiting into is hot. Even more
// interestingly, since the code was inlined, it may never otherwise get JIT
// compiled since the act of inlining it may ensure that it otherwise never runs.
for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
static_cast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())
->baselineCodeBlockFor(codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct)
->jitCompile(exec);
}
// Compute the value recoveries.
Operands<ValueRecovery> operands;
codeBlock->variableEventStream().reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->minifiedDFG(), exit.m_streamIndex, operands);
// There may be an override, for forward speculations.
if (!!exit.m_valueRecoveryOverride) {
operands.setOperand(
exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery);
}
SpeculationRecovery* recovery = 0;
if (exit.m_recoveryIndex)
recovery = &codeBlock->speculationRecovery(exit.m_recoveryIndex - 1);
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog(
"Generating OSR exit #", exitIndex, " (seq#", exit.m_streamIndex,
", bc#", exit.m_codeOrigin.bytecodeIndex, ", @", exit.m_nodeIndex, ", ",
exit.m_kind, ") for ", *codeBlock, ".\n");
#endif
{
CCallHelpers jit(globalData, codeBlock);
OSRExitCompiler exitCompiler(jit);
jit.jitAssertHasValidCallFrame();
if (globalData->m_perBytecodeProfiler && codeBlock->compilation()) {
Profiler::Database& database = *globalData->m_perBytecodeProfiler;
Profiler::Compilation* compilation = codeBlock->compilation();
Profiler::OSRExit* profilerExit = compilation->addOSRExit(
exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
exit.m_kind,
exit.m_watchpointIndex != std::numeric_limits<unsigned>::max());
jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
}
exitCompiler.compileExit(exit, operands, recovery);
LinkBuffer patchBuffer(*globalData, &jit, codeBlock);
exit.m_code = FINALIZE_CODE_IF(
shouldShowDisassembly(),
patchBuffer,
("DFG OSR exit #%u (bc#%u, @%u, %s) from %s",
exitIndex, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex,
exitKindToString(exit.m_kind), toCString(*codeBlock).data()));
}
{
RepatchBuffer repatchBuffer(codeBlock);
repatchBuffer.relink(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
}
globalData->osrExitJumpDestination = exit.m_code.code().executableAddress();
}
示例8: compileExit
void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
{
// 1) Pro-forma stuff.
if (Options::printEachOSRExit()) {
SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
debugInfo->codeBlock = m_jit.codeBlock();
debugInfo->kind = exit.m_kind;
debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
}
// Need to ensure that the stack pointer accounts for the worst-case stack usage at exit.
m_jit.addPtr(
CCallHelpers::TrustedImm32(
-m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
// 2) Perform speculation recovery. This only comes into play when an operation
// starts mutating state before verifying the speculation it has already made.
if (recovery) {
switch (recovery->type()) {
case SpeculativeAdd:
m_jit.sub32(recovery->src(), recovery->dest());
break;
case BooleanSpeculationCheck:
break;
default:
break;
}
}
// 3) Refine some value profile, if appropriate.
if (!!exit.m_jsValueSource) {
if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
// If the instruction that this originated from has an array profile, then
// refine it. If it doesn't, then do nothing. The latter could happen for
// hoisted checks, or checks emitted for operations that didn't have array
// profiling - either ops that aren't array accesses at all, or weren't
// known to be array acceses in the bytecode. The latter case is a FIXME
// while the former case is an outcome of a CheckStructure not knowing why
// it was emitted (could be either due to an inline cache of a property
// property access, or due to an array profile).
// Note: We are free to assume that the jsValueSource is already known to
// be a cell since both BadCache and BadIndexingType exits occur after
// the cell check would have already happened.
CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
GPRReg usedRegister1;
GPRReg usedRegister2;
if (exit.m_jsValueSource.isAddress()) {
usedRegister1 = exit.m_jsValueSource.base();
usedRegister2 = InvalidGPRReg;
} else {
usedRegister1 = exit.m_jsValueSource.payloadGPR();
if (exit.m_jsValueSource.hasKnownTag())
usedRegister2 = InvalidGPRReg;
else
usedRegister2 = exit.m_jsValueSource.tagGPR();
}
GPRReg scratch1;
GPRReg scratch2;
scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
#if CPU(ARM64)
m_jit.pushToSave(scratch1);
m_jit.pushToSave(scratch2);
#else
m_jit.push(scratch1);
m_jit.push(scratch2);
#endif
GPRReg value;
if (exit.m_jsValueSource.isAddress()) {
value = scratch1;
m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
} else
value = exit.m_jsValueSource.payloadGPR();
m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructureID());
m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
m_jit.lshift32(scratch1, scratch2);
m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
#if CPU(ARM64)
m_jit.popToRestore(scratch2);
m_jit.popToRestore(scratch1);
#else
m_jit.pop(scratch2);
m_jit.pop(scratch1);
//.........这里部分代码省略.........
示例9: compileOSRExit
void compileOSRExit(ExecState* exec)
{
SamplingRegion samplingRegion("DFG OSR Exit Compilation");
CodeBlock* codeBlock = exec->codeBlock();
ASSERT(codeBlock);
ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
VM* vm = &exec->vm();
// It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
// really be profitable.
DeferGCForAWhile deferGC(vm->heap);
uint32_t exitIndex = vm->osrExitIndex;
OSRExit& exit = codeBlock->jitCode()->dfg()->osrExit[exitIndex];
prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
// Compute the value recoveries.
Operands<ValueRecovery> operands;
codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands);
// There may be an override, for forward speculations.
if (!!exit.m_valueRecoveryOverride) {
operands.setOperand(
exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery);
}
SpeculationRecovery* recovery = 0;
if (exit.m_recoveryIndex != UINT_MAX)
recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex];
{
CCallHelpers jit(vm, codeBlock);
OSRExitCompiler exitCompiler(jit);
jit.jitAssertHasValidCallFrame();
if (vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation) {
Profiler::Database& database = *vm->m_perBytecodeProfiler;
Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
Profiler::OSRExit* profilerExit = compilation->addOSRExit(
exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
exit.m_kind, exit.m_kind == UncountableInvalidation);
jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
}
exitCompiler.compileExit(exit, operands, recovery);
LinkBuffer patchBuffer(*vm, jit, codeBlock);
exit.m_code = FINALIZE_CODE_IF(
shouldShowDisassembly() || Options::verboseOSR(),
patchBuffer,
("DFG OSR exit #%u (%s, %s) from %s, with operands = %s",
exitIndex, toCString(exit.m_codeOrigin).data(),
exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
toCString(ignoringContext<DumpContext>(operands)).data()));
}
{
RepatchBuffer repatchBuffer(codeBlock);
repatchBuffer.relink(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
}
vm->osrExitJumpDestination = exit.m_code.code().executableAddress();
}
示例10: compileExit
void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
{
// 1) Pro-forma stuff.
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("OSR exit for (");
for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
dataLogF("bc#%u", codeOrigin.bytecodeIndex);
if (!codeOrigin.inlineCallFrame)
break;
dataLogF(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
}
dataLogF(") ");
dataLog(operands);
#endif
if (Options::printEachOSRExit()) {
SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
debugInfo->codeBlock = m_jit.codeBlock();
m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
}
#if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
m_jit.breakpoint();
#endif
#if DFG_ENABLE(SUCCESS_STATS)
static SamplingCounter counter("SpeculationFailure");
m_jit.emitCount(counter);
#endif
// 2) Perform speculation recovery. This only comes into play when an operation
// starts mutating state before verifying the speculation it has already made.
if (recovery) {
switch (recovery->type()) {
case SpeculativeAdd:
m_jit.sub32(recovery->src(), recovery->dest());
m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
break;
case BooleanSpeculationCheck:
m_jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
break;
default:
break;
}
}
// 3) Refine some array and/or value profile, if appropriate.
if (!!exit.m_jsValueSource) {
if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
// If the instruction that this originated from has an array profile, then
// refine it. If it doesn't, then do nothing. The latter could happen for
// hoisted checks, or checks emitted for operations that didn't have array
// profiling - either ops that aren't array accesses at all, or weren't
// known to be array acceses in the bytecode. The latter case is a FIXME
// while the former case is an outcome of a CheckStructure not knowing why
// it was emitted (could be either due to an inline cache of a property
// property access, or due to an array profile).
CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
GPRReg usedRegister;
if (exit.m_jsValueSource.isAddress())
usedRegister = exit.m_jsValueSource.base();
else
usedRegister = exit.m_jsValueSource.gpr();
GPRReg scratch1;
GPRReg scratch2;
scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
#if CPU(ARM64)
m_jit.pushToSave(scratch1);
m_jit.pushToSave(scratch2);
#else
m_jit.push(scratch1);
m_jit.push(scratch2);
#endif
GPRReg value;
if (exit.m_jsValueSource.isAddress()) {
value = scratch1;
m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
} else
value = exit.m_jsValueSource.gpr();
m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
m_jit.lshift32(scratch1, scratch2);
m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
#if CPU(ARM64)
m_jit.popToRestore(scratch2);
//.........这里部分代码省略.........
示例11: ASSERT
void VariableEventStream::reconstruct(
CodeBlock* codeBlock, CodeOrigin codeOrigin, MinifiedGraph& graph,
unsigned index, Operands<ValueRecovery>& valueRecoveries) const
{
ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
CodeBlock* baselineCodeBlock = codeBlock->baselineVersion();
unsigned numVariables;
if (codeOrigin.inlineCallFrame)
numVariables = baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeRegisters + VirtualRegister(codeOrigin.inlineCallFrame->stackOffset).toLocal() + 1;
else
numVariables = baselineCodeBlock->m_numCalleeRegisters;
// Crazy special case: if we're at index == 0 then this must be an argument check
// failure, in which case all variables are already set up. The recoveries should
// reflect this.
if (!index) {
valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
for (size_t i = 0; i < valueRecoveries.size(); ++i) {
valueRecoveries[i] = ValueRecovery::displacedInJSStack(
VirtualRegister(valueRecoveries.operandForIndex(i)), DataFormatJS);
}
return;
}
// Step 1: Find the last checkpoint, and figure out the number of virtual registers as we go.
unsigned startIndex = index - 1;
while (at(startIndex).kind() != Reset)
startIndex--;
// Step 2: Create a mock-up of the DFG's state and execute the events.
Operands<ValueSource> operandSources(codeBlock->numParameters(), numVariables);
for (unsigned i = operandSources.size(); i--;)
operandSources[i] = ValueSource(SourceIsDead);
HashMap<MinifiedID, MinifiedGenerationInfo> generationInfos;
for (unsigned i = startIndex; i < index; ++i) {
const VariableEvent& event = at(i);
switch (event.kind()) {
case Reset:
// nothing to do.
break;
case BirthToFill:
case BirthToSpill:
case Birth: {
MinifiedGenerationInfo info;
info.update(event);
generationInfos.add(event.id(), info);
break;
}
case Fill:
case Spill:
case Death: {
HashMap<MinifiedID, MinifiedGenerationInfo>::iterator iter = generationInfos.find(event.id());
ASSERT(iter != generationInfos.end());
iter->value.update(event);
break;
}
case MovHintEvent:
if (operandSources.hasOperand(event.bytecodeRegister()))
operandSources.setOperand(event.bytecodeRegister(), ValueSource(event.id()));
break;
case SetLocalEvent:
if (operandSources.hasOperand(event.bytecodeRegister()))
operandSources.setOperand(event.bytecodeRegister(), ValueSource::forDataFormat(event.machineRegister(), event.dataFormat()));
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
}
// Step 3: Compute value recoveries!
valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
for (unsigned i = 0; i < operandSources.size(); ++i) {
ValueSource& source = operandSources[i];
if (source.isTriviallyRecoverable()) {
valueRecoveries[i] = source.valueRecovery();
continue;
}
ASSERT(source.kind() == HaveNode);
MinifiedNode* node = graph.at(source.id());
MinifiedGenerationInfo info = generationInfos.get(source.id());
if (!info.alive) {
valueRecoveries[i] = ValueRecovery::constant(jsUndefined());
continue;
}
if (tryToSetConstantRecovery(valueRecoveries[i], node))
continue;
ASSERT(info.format != DataFormatNone);
if (info.filled) {
if (info.format == DataFormatDouble) {
valueRecoveries[i] = ValueRecovery::inFPR(info.u.fpr, DataFormatDouble);
continue;
}
#if USE(JSVALUE32_64)
if (info.format & DataFormatJS) {
//.........这里部分代码省略.........
示例12: ASSERT
void VariableEventStream::reconstruct(
CodeBlock* codeBlock, CodeOrigin codeOrigin, MinifiedGraph& graph,
unsigned index, Operands<ValueRecovery>& valueRecoveries) const
{
ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
CodeBlock* baselineCodeBlock = codeBlock->baselineVersion();
unsigned numVariables;
if (codeOrigin.inlineCallFrame)
numVariables = baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeRegisters + codeOrigin.inlineCallFrame->stackOffset;
else
numVariables = baselineCodeBlock->m_numCalleeRegisters;
// Crazy special case: if we're at index == 0 then this must be an argument check
// failure, in which case all variables are already set up. The recoveries should
// reflect this.
if (!index) {
valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
for (size_t i = 0; i < valueRecoveries.size(); ++i)
valueRecoveries[i] = ValueRecovery::alreadyInJSStack();
return;
}
// Step 1: Find the last checkpoint, and figure out the number of virtual registers as we go.
unsigned startIndex = index - 1;
while (at(startIndex).kind() != Reset)
startIndex--;
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Computing OSR exit recoveries starting at seq#%u.\n", startIndex);
#endif
// Step 2: Create a mock-up of the DFG's state and execute the events.
Operands<ValueSource> operandSources(codeBlock->numParameters(), numVariables);
Vector<MinifiedGenerationInfo, 32> generationInfos(graph.originalGraphSize());
for (unsigned i = startIndex; i < index; ++i) {
const VariableEvent& event = at(i);
switch (event.kind()) {
case Reset:
// nothing to do.
break;
case BirthToFill:
case BirthToSpill:
case Fill:
case Spill:
case Death:
generationInfos[event.nodeIndex()].update(event);
break;
case MovHint:
if (operandSources.hasOperand(event.operand()))
operandSources.setOperand(event.operand(), ValueSource(event.nodeIndex()));
break;
case SetLocalEvent:
if (operandSources.hasOperand(event.operand()))
operandSources.setOperand(event.operand(), ValueSource::forDataFormat(event.dataFormat()));
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
}
// Step 3: Record the things that are live, so we can get to them more quickly.
Vector<unsigned, 16> indicesOfLiveThings;
for (unsigned i = 0; i < generationInfos.size(); ++i) {
if (generationInfos[i].format != DataFormatNone)
indicesOfLiveThings.append(i);
}
// Step 4: Compute value recoveries!
valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
for (unsigned i = 0; i < operandSources.size(); ++i) {
ValueSource& source = operandSources[i];
if (source.isTriviallyRecoverable()) {
valueRecoveries[i] = source.valueRecovery();
continue;
}
ASSERT(source.kind() == HaveNode);
MinifiedNode* node = graph.at(source.nodeIndex());
if (node) {
if (node->hasConstantNumber()) {
valueRecoveries[i] = ValueRecovery::constant(
codeBlock->constantRegister(
FirstConstantRegisterIndex + node->constantNumber()).get());
continue;
}
if (node->hasWeakConstant()) {
valueRecoveries[i] = ValueRecovery::constant(node->weakConstant());
continue;
}
if (node->op() == PhantomArguments) {
valueRecoveries[i] = ValueRecovery::argumentsThatWereNotCreated();
continue;
}
}
MinifiedGenerationInfo* info = &generationInfos[source.nodeIndex()];
if (info->format == DataFormatNone) {
// Try to see if there is an alternate node that would contain the value we want.
//.........这里部分代码省略.........
示例13: emitRestoreArguments
void OSRExitCompiler::emitRestoreArguments(const Operands<ValueRecovery>& operands)
{
HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
int operand = operands.operandForIndex(index);
if (recovery.technique() != DirectArgumentsThatWereNotCreated
&& recovery.technique() != ClonedArgumentsThatWereNotCreated)
continue;
MinifiedID id = recovery.nodeID();
auto iter = alreadyAllocatedArguments.find(id);
if (iter != alreadyAllocatedArguments.end()) {
JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
m_jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
m_jit.storeValue(regs, CCallHelpers::addressFor(operand));
continue;
}
InlineCallFrame* inlineCallFrame =
m_jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();
int stackOffset;
if (inlineCallFrame)
stackOffset = inlineCallFrame->stackOffset;
else
stackOffset = 0;
if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
m_jit.loadPtr(
AssemblyHelpers::addressFor(stackOffset + JSStack::Callee),
GPRInfo::regT0);
} else {
m_jit.move(
AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
GPRInfo::regT0);
}
if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
m_jit.load32(
AssemblyHelpers::payloadFor(stackOffset + JSStack::ArgumentCount),
GPRInfo::regT1);
} else {
m_jit.move(
AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()),
GPRInfo::regT1);
}
m_jit.setupArgumentsWithExecState(
AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
switch (recovery.technique()) {
case DirectArgumentsThatWereNotCreated:
m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
break;
case ClonedArgumentsThatWereNotCreated:
m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
m_jit.call(GPRInfo::nonArgGPR0);
m_jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
alreadyAllocatedArguments.add(id, operand);
}
}