本文整理汇总了C++中Operands::size方法的典型用法代码示例。如果您正苦于以下问题:C++ Operands::size方法的具体用法?C++ Operands::size怎么用?C++ Operands::size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Operands
的用法示例。
在下文中一共展示了Operands::size方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: clobber
void clobber(const Operands<VariableAccessData*>& live)
{
for (size_t i = live.size(); i--;) {
VariableAccessData* variable = live[i];
if (!variable)
continue;
noticeClobber(variable);
}
}
示例2: reconstruct
void JITCode::reconstruct(
ExecState* exec, CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
Operands<JSValue>& result)
{
Operands<ValueRecovery> recoveries;
reconstruct(codeBlock, codeOrigin, streamIndex, recoveries);
result = Operands<JSValue>(OperandsLike, recoveries);
for (size_t i = result.size(); i--;) {
int operand = result.operandForIndex(i);
if (operandIsArgument(operand)
&& !VirtualRegister(operand).toArgument()
&& codeBlock->codeType() == FunctionCode
&& codeBlock->specializationKind() == CodeForConstruct) {
// Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will
// also never be used. It doesn't matter what we put into the value for this,
// but it has to be an actual value that can be grokked by subsequent DFG passes,
// so we sanitize it here by turning it into Undefined.
result[i] = jsUndefined();
continue;
}
ValueRecovery recovery = recoveries[i];
JSValue value;
switch (recovery.technique()) {
case AlreadyInJSStack:
case AlreadyInJSStackAsUnboxedCell:
case AlreadyInJSStackAsUnboxedBoolean:
value = exec->r(operand).jsValue();
break;
case AlreadyInJSStackAsUnboxedInt32:
value = jsNumber(exec->r(operand).unboxedInt32());
break;
case AlreadyInJSStackAsUnboxedInt52:
value = jsNumber(exec->r(operand).unboxedInt52());
break;
case AlreadyInJSStackAsUnboxedDouble:
value = jsDoubleNumber(exec->r(operand).unboxedDouble());
break;
case Constant:
value = recovery.constant();
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
result[i] = value;
}
}
示例3: run
bool run()
{
ASSERT(m_graph.m_form == SSA);
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
block->ssa->availabilityAtHead.fill(Availability());
block->ssa->availabilityAtTail.fill(Availability());
}
BasicBlock* root = m_graph.block(0);
for (unsigned argument = root->ssa->availabilityAtHead.numberOfArguments(); argument--;) {
root->ssa->availabilityAtHead.argument(argument) =
Availability::unavailable().withFlush(
FlushedAt(FlushedJSValue, virtualRegisterForArgument(argument)));
}
for (unsigned local = root->ssa->availabilityAtHead.numberOfLocals(); local--;)
root->ssa->availabilityAtHead.local(local) = Availability::unavailable();
if (m_graph.m_plan.mode == FTLForOSREntryMode) {
for (unsigned local = m_graph.m_profiledBlock->m_numCalleeRegisters; local--;) {
root->ssa->availabilityAtHead.local(local) =
Availability::unavailable().withFlush(
FlushedAt(FlushedJSValue, virtualRegisterForLocal(local)));
}
}
// This could be made more efficient by processing blocks in reverse postorder.
Operands<Availability> availability;
bool changed;
do {
changed = false;
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
availability = block->ssa->availabilityAtHead;
for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
Node* node = block->at(nodeIndex);
switch (node->op()) {
case SetLocal: {
VariableAccessData* variable = node->variableAccessData();
availability.operand(variable->local()) =
Availability(node->child1().node(), variable->flushedAt());
break;
}
case GetArgument: {
VariableAccessData* variable = node->variableAccessData();
availability.operand(variable->local()) =
Availability(node, variable->flushedAt());
break;
}
case MovHint:
case MovHintAndCheck: {
VariableAccessData* variable = node->variableAccessData();
availability.operand(variable->local()) =
Availability(node->child1().node());
break;
}
case ZombieHint: {
VariableAccessData* variable = node->variableAccessData();
availability.operand(variable->local()) = Availability::unavailable();
break;
}
default:
break;
}
}
if (availability == block->ssa->availabilityAtTail)
continue;
block->ssa->availabilityAtTail = availability;
changed = true;
for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) {
BasicBlock* successor = block->successor(successorIndex);
for (unsigned i = availability.size(); i--;) {
successor->ssa->availabilityAtHead[i] = availability[i].merge(
successor->ssa->availabilityAtHead[i]);
}
}
}
} while (changed);
return true;
}
示例4: compileExit
void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
{
// 1) Pro-forma stuff.
if (Options::printEachOSRExit()) {
SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
debugInfo->codeBlock = m_jit.codeBlock();
debugInfo->kind = exit.m_kind;
debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
}
// Need to ensure that the stack pointer accounts for the worst-case stack usage at exit.
m_jit.addPtr(
CCallHelpers::TrustedImm32(
-m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
// 2) Perform speculation recovery. This only comes into play when an operation
// starts mutating state before verifying the speculation it has already made.
if (recovery) {
switch (recovery->type()) {
case SpeculativeAdd:
m_jit.sub32(recovery->src(), recovery->dest());
break;
case BooleanSpeculationCheck:
break;
default:
break;
}
}
// 3) Refine some value profile, if appropriate.
if (!!exit.m_jsValueSource) {
if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
// If the instruction that this originated from has an array profile, then
// refine it. If it doesn't, then do nothing. The latter could happen for
// hoisted checks, or checks emitted for operations that didn't have array
// profiling - either ops that aren't array accesses at all, or weren't
// known to be array acceses in the bytecode. The latter case is a FIXME
// while the former case is an outcome of a CheckStructure not knowing why
// it was emitted (could be either due to an inline cache of a property
// property access, or due to an array profile).
// Note: We are free to assume that the jsValueSource is already known to
// be a cell since both BadCache and BadIndexingType exits occur after
// the cell check would have already happened.
CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
GPRReg usedRegister1;
GPRReg usedRegister2;
if (exit.m_jsValueSource.isAddress()) {
usedRegister1 = exit.m_jsValueSource.base();
usedRegister2 = InvalidGPRReg;
} else {
usedRegister1 = exit.m_jsValueSource.payloadGPR();
if (exit.m_jsValueSource.hasKnownTag())
usedRegister2 = InvalidGPRReg;
else
usedRegister2 = exit.m_jsValueSource.tagGPR();
}
GPRReg scratch1;
GPRReg scratch2;
scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
#if CPU(ARM64)
m_jit.pushToSave(scratch1);
m_jit.pushToSave(scratch2);
#else
m_jit.push(scratch1);
m_jit.push(scratch2);
#endif
GPRReg value;
if (exit.m_jsValueSource.isAddress()) {
value = scratch1;
m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
} else
value = exit.m_jsValueSource.payloadGPR();
m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructureID());
m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
m_jit.lshift32(scratch1, scratch2);
m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
#if CPU(ARM64)
m_jit.popToRestore(scratch2);
m_jit.popToRestore(scratch1);
#else
m_jit.pop(scratch2);
m_jit.pop(scratch1);
//.........这里部分代码省略.........
示例5: compileExit
//.........这里部分代码省略.........
// originally been in a register or spilled. This allows us to decouple "where was
// the variable" from "how was it represented". Consider that the
// Int32DisplacedInJSStack recovery: it tells us that the value is in a
// particular place and that that place holds an unboxed int32. We have two different
// places that a value could be (displaced, register) and a bunch of different
// ways of representing a value. The number of recoveries is two * a bunch. The code
// below means that we have to have two + a bunch cases rather than two * a bunch.
// Once we have loaded the value from wherever it was, the reboxing is the same
// regardless of its location. Likewise, before we do the reboxing, the way we get to
// the value (i.e. where we load it from) is the same regardless of its type. Because
// the code below always dumps everything into a scratch buffer first, the two
// questions become orthogonal, which simplifies adding new types and adding new
// locations.
//
// This raises the question: does using such a suboptimal implementation of OSR exit,
// where we always emit code to dump all state into a scratch buffer only to then
// dump it right back into the stack, hurt us in any way? The asnwer is that OSR exits
// are rare. Our tiering strategy ensures this. This is because if an OSR exit is
// taken more than ~100 times, we jettison the DFG code block along with all of its
// exits. It is impossible for an OSR exit - i.e. the code we compile below - to
// execute frequently enough for the codegen to matter that much. It probably matters
// enough that we don't want to turn this into some super-slow function call, but so
// long as we're generating straight-line code, that code can be pretty bad. Also
// because we tend to exit only along one OSR exit from any DFG code block - that's an
// empirical result that we're extremely confident about - the code size of this
// doesn't matter much. Hence any attempt to optimize the codegen here is just purely
// harmful to the system: it probably won't reduce either net memory usage or net
// execution time. It will only prevent us from cleanly decoupling "where was the
// variable" from "how was it represented", which will make it more difficult to add
// features in the future and it will make it harder to reason about bugs.
// 4) Save all state from GPRs into the scratch buffer.
ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
switch (recovery.technique()) {
case InGPR:
case UnboxedInt32InGPR:
case UInt32InGPR:
case UnboxedInt52InGPR:
case UnboxedStrictInt52InGPR:
case UnboxedCellInGPR:
m_jit.store64(recovery.gpr(), scratch + index);
break;
default:
break;
}
}
// And voila, all GPRs are free to reuse.
// 5) Save all state from FPRs into the scratch buffer.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
switch (recovery.technique()) {
case InFPR:
m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
m_jit.storeDouble(recovery.fpr(), GPRInfo::regT0);
break;
示例6: reconstruct
void VariableEventStream::reconstruct(
CodeBlock* codeBlock, CodeOrigin codeOrigin, MinifiedGraph& graph,
unsigned index, Operands<ValueRecovery>& valueRecoveries) const
{
ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
CodeBlock* baselineCodeBlock = codeBlock->baselineVersion();
unsigned numVariables;
if (codeOrigin.inlineCallFrame)
numVariables = baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeRegisters + VirtualRegister(codeOrigin.inlineCallFrame->stackOffset).toLocal() + 1;
else
numVariables = baselineCodeBlock->m_numCalleeRegisters;
// Crazy special case: if we're at index == 0 then this must be an argument check
// failure, in which case all variables are already set up. The recoveries should
// reflect this.
if (!index) {
valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
for (size_t i = 0; i < valueRecoveries.size(); ++i) {
valueRecoveries[i] = ValueRecovery::displacedInJSStack(
VirtualRegister(valueRecoveries.operandForIndex(i)), DataFormatJS);
}
return;
}
// Step 1: Find the last checkpoint, and figure out the number of virtual registers as we go.
unsigned startIndex = index - 1;
while (at(startIndex).kind() != Reset)
startIndex--;
// Step 2: Create a mock-up of the DFG's state and execute the events.
Operands<ValueSource> operandSources(codeBlock->numParameters(), numVariables);
for (unsigned i = operandSources.size(); i--;)
operandSources[i] = ValueSource(SourceIsDead);
HashMap<MinifiedID, MinifiedGenerationInfo> generationInfos;
for (unsigned i = startIndex; i < index; ++i) {
const VariableEvent& event = at(i);
switch (event.kind()) {
case Reset:
// nothing to do.
break;
case BirthToFill:
case BirthToSpill:
case Birth: {
MinifiedGenerationInfo info;
info.update(event);
generationInfos.add(event.id(), info);
break;
}
case Fill:
case Spill:
case Death: {
HashMap<MinifiedID, MinifiedGenerationInfo>::iterator iter = generationInfos.find(event.id());
ASSERT(iter != generationInfos.end());
iter->value.update(event);
break;
}
case MovHintEvent:
if (operandSources.hasOperand(event.bytecodeRegister()))
operandSources.setOperand(event.bytecodeRegister(), ValueSource(event.id()));
break;
case SetLocalEvent:
if (operandSources.hasOperand(event.bytecodeRegister()))
operandSources.setOperand(event.bytecodeRegister(), ValueSource::forDataFormat(event.machineRegister(), event.dataFormat()));
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
}
// Step 3: Compute value recoveries!
valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
for (unsigned i = 0; i < operandSources.size(); ++i) {
ValueSource& source = operandSources[i];
if (source.isTriviallyRecoverable()) {
valueRecoveries[i] = source.valueRecovery();
continue;
}
ASSERT(source.kind() == HaveNode);
MinifiedNode* node = graph.at(source.id());
MinifiedGenerationInfo info = generationInfos.get(source.id());
if (!info.alive) {
valueRecoveries[i] = ValueRecovery::constant(jsUndefined());
continue;
}
if (tryToSetConstantRecovery(valueRecoveries[i], node))
continue;
ASSERT(info.format != DataFormatNone);
if (info.filled) {
if (info.format == DataFormatDouble) {
valueRecoveries[i] = ValueRecovery::inFPR(info.u.fpr, DataFormatDouble);
continue;
}
#if USE(JSVALUE32_64)
if (info.format & DataFormatJS) {
//.........这里部分代码省略.........
示例7: emitRestoreArguments
void OSRExitCompiler::emitRestoreArguments(const Operands<ValueRecovery>& operands)
{
HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
int operand = operands.operandForIndex(index);
if (recovery.technique() != DirectArgumentsThatWereNotCreated
&& recovery.technique() != ClonedArgumentsThatWereNotCreated)
continue;
MinifiedID id = recovery.nodeID();
auto iter = alreadyAllocatedArguments.find(id);
if (iter != alreadyAllocatedArguments.end()) {
JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
m_jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
m_jit.storeValue(regs, CCallHelpers::addressFor(operand));
continue;
}
InlineCallFrame* inlineCallFrame =
m_jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();
int stackOffset;
if (inlineCallFrame)
stackOffset = inlineCallFrame->stackOffset;
else
stackOffset = 0;
if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
m_jit.loadPtr(
AssemblyHelpers::addressFor(stackOffset + JSStack::Callee),
GPRInfo::regT0);
} else {
m_jit.move(
AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
GPRInfo::regT0);
}
if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
m_jit.load32(
AssemblyHelpers::payloadFor(stackOffset + JSStack::ArgumentCount),
GPRInfo::regT1);
} else {
m_jit.move(
AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()),
GPRInfo::regT1);
}
m_jit.setupArgumentsWithExecState(
AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
switch (recovery.technique()) {
case DirectArgumentsThatWereNotCreated:
m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
break;
case ClonedArgumentsThatWereNotCreated:
m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
m_jit.call(GPRInfo::nonArgGPR0);
m_jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
alreadyAllocatedArguments.add(id, operand);
}
}