本文整理汇总了C++中FrameEntry::isTypeKnown方法的典型用法代码示例。如果您正苦于以下问题:C++ FrameEntry::isTypeKnown方法的具体用法?C++ FrameEntry::isTypeKnown怎么用?C++ FrameEntry::isTypeKnown使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类FrameEntry
的用法示例。
在下文中一共展示了FrameEntry::isTypeKnown方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: entryFor
void
FrameState::pushCopyOf(uint32 index)
{
FrameEntry *backing = entryFor(index);
FrameEntry *fe = rawPush();
fe->resetUnsynced();
if (backing->isConstant()) {
fe->setConstant(Jsvalify(backing->getValue()));
} else {
if (backing->isTypeKnown())
fe->setType(backing->getKnownType());
else
fe->type.invalidate();
fe->isNumber = backing->isNumber;
fe->data.invalidate();
if (backing->isCopy()) {
backing = backing->copyOf();
fe->setCopyOf(backing);
} else {
fe->setCopyOf(backing);
backing->setCopied();
}
/* Maintain tracker ordering guarantees for copies. */
JS_ASSERT(backing->isCopied());
if (fe->trackerIndex() < backing->trackerIndex())
swapInTracker(fe, backing);
}
}
示例2: compileArrayWithLength
CompileStatus
mjit::Compiler::inlineNativeFunction(uint32_t argc, bool callingNew)
{
if (!cx->typeInferenceEnabled())
return Compile_InlineAbort;
if (applyTricks == LazyArgsObj)
return Compile_InlineAbort;
FrameEntry *origCallee = frame.peek(-((int)argc + 2));
FrameEntry *thisValue = frame.peek(-((int)argc + 1));
types::TypeSet *thisTypes = analysis->poppedTypes(PC, argc);
if (!origCallee->isConstant() || !origCallee->isType(JSVAL_TYPE_OBJECT))
return Compile_InlineAbort;
JSObject *callee = &origCallee->getValue().toObject();
if (!callee->isFunction())
return Compile_InlineAbort;
/*
* The callee must have the same parent as the script's global, otherwise
* inference may not have accounted for any side effects correctly.
*/
if (!globalObj || globalObj != &callee->global())
return Compile_InlineAbort;
Native native = callee->toFunction()->maybeNative();
if (!native)
return Compile_InlineAbort;
JSValueType type = knownPushedType(0);
JSValueType thisType = thisValue->isTypeKnown()
? thisValue->getKnownType()
: JSVAL_TYPE_UNKNOWN;
/*
* Note: when adding new natives which operate on properties, add relevant
* constraint generation to the behavior of TypeConstraintCall.
*/
/* Handle natives that can be called either with or without 'new'. */
if (native == js_Array && type == JSVAL_TYPE_OBJECT && globalObj) {
if (argc == 0 || argc == 1)
return compileArrayWithLength(argc);
return compileArrayWithArgs(argc);
}
/* Remaining natives must not be called with 'new'. */
if (callingNew)
return Compile_InlineAbort;
if (native == js::num_parseInt && argc >= 1) {
FrameEntry *arg = frame.peek(-(int32_t)argc);
JSValueType argType = arg->isTypeKnown() ? arg->getKnownType() : JSVAL_TYPE_UNKNOWN;
if ((argType == JSVAL_TYPE_DOUBLE || argType == JSVAL_TYPE_INT32) &&
type == JSVAL_TYPE_INT32) {
return compileParseInt(argType, argc);
}
}
if (argc == 0) {
if ((native == js::array_pop || native == js::array_shift) && thisType == JSVAL_TYPE_OBJECT) {
/*
* Only handle pop/shift on dense arrays which have never been used
* in an iterator --- when popping elements we don't account for
* suppressing deleted properties in active iterators.
*
* Constraints propagating properties directly into the result
* type set are generated by TypeConstraintCall during inference.
*/
if (!thisTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_DENSE_ARRAY |
types::OBJECT_FLAG_ITERATED) &&
!types::ArrayPrototypeHasIndexedProperty(cx, outerScript)) {
bool packed = !thisTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_PACKED_ARRAY);
return compileArrayPopShift(thisValue, packed, native == js::array_pop);
}
}
} else if (argc == 1) {
FrameEntry *arg = frame.peek(-1);
types::TypeSet *argTypes = frame.extra(arg).types;
if (!argTypes)
return Compile_InlineAbort;
JSValueType argType = arg->isTypeKnown() ? arg->getKnownType() : JSVAL_TYPE_UNKNOWN;
if (native == js_math_abs) {
if (argType == JSVAL_TYPE_INT32 && type == JSVAL_TYPE_INT32)
return compileMathAbsInt(arg);
if (argType == JSVAL_TYPE_DOUBLE && type == JSVAL_TYPE_DOUBLE)
return compileMathAbsDouble(arg);
}
if (native == js_math_floor && argType == JSVAL_TYPE_DOUBLE &&
type == JSVAL_TYPE_INT32) {
return compileRound(arg, Floor);
}
if (native == js_math_round && argType == JSVAL_TYPE_DOUBLE &&
//.........这里部分代码省略.........
示例3: Uses
CompileStatus
mjit::Compiler::compileParseInt(JSValueType argType, uint32_t argc)
{
bool needStubCall = false;
if (argc > 1) {
FrameEntry *arg = frame.peek(-(int32_t)argc + 1);
if (!arg->isTypeKnown() || arg->getKnownType() != JSVAL_TYPE_INT32)
return Compile_InlineAbort;
if (arg->isConstant()) {
int32_t base = arg->getValue().toInt32();
if (base != 0 && base != 10)
return Compile_InlineAbort;
} else {
RegisterID baseReg = frame.tempRegForData(arg);
needStubCall = true;
Jump isTen = masm.branch32(Assembler::Equal, baseReg, Imm32(10));
Jump isNotZero = masm.branch32(Assembler::NotEqual, baseReg, Imm32(0));
stubcc.linkExit(isNotZero, Uses(2 + argc));
isTen.linkTo(masm.label(), &masm);
}
}
if (argType == JSVAL_TYPE_INT32) {
if (needStubCall) {
stubcc.leave();
stubcc.masm.move(Imm32(argc), Registers::ArgReg1);
OOL_STUBCALL(stubs::SlowCall, REJOIN_FALLTHROUGH);
}
/*
* Stack looks like callee, this, arg1, arg2, argN.
* First pop all args other than arg1.
*/
frame.popn(argc - 1);
/* "Shimmy" arg1 to the callee slot and pop this + arg1. */
frame.shimmy(2);
if (needStubCall) {
stubcc.rejoin(Changes(1));
}
} else {
FrameEntry *arg = frame.peek(-(int32_t)argc);
FPRegisterID fpScratchReg = frame.allocFPReg();
FPRegisterID fpReg;
bool allocate;
DebugOnly<MaybeJump> notNumber = loadDouble(arg, &fpReg, &allocate);
JS_ASSERT(!((MaybeJump)notNumber).isSet());
masm.slowLoadConstantDouble(1, fpScratchReg);
/* Slow path for NaN and numbers < 1. */
Jump lessThanOneOrNan = masm.branchDouble(Assembler::DoubleLessThanOrUnordered,
fpReg, fpScratchReg);
stubcc.linkExit(lessThanOneOrNan, Uses(2 + argc));
frame.freeReg(fpScratchReg);
/* Truncate to integer, slow path if this overflows. */
RegisterID reg = frame.allocReg();
Jump overflow = masm.branchTruncateDoubleToInt32(fpReg, reg);
stubcc.linkExit(overflow, Uses(2 + argc));
if (allocate)
frame.freeReg(fpReg);
stubcc.leave();
stubcc.masm.move(Imm32(argc), Registers::ArgReg1);
OOL_STUBCALL(stubs::SlowCall, REJOIN_FALLTHROUGH);
frame.popn(2 + argc);
frame.pushTypedPayload(JSVAL_TYPE_INT32, reg);
stubcc.rejoin(Changes(1));
}
return Compile_Okay;
}
示例4: local
void
FrameState::storeLocal(uint32 n, bool popGuaranteed, bool typeChange)
{
FrameEntry *localFe = getLocal(n);
bool cacheable = !eval && !escaping[n];
if (!popGuaranteed && !cacheable) {
JS_ASSERT_IF(base[localIndex(n)] && (!eval || n < script->nfixed),
entries[localIndex(n)].type.inMemory() &&
entries[localIndex(n)].data.inMemory());
Address local(JSFrameReg, sizeof(JSStackFrame) + n * sizeof(Value));
storeTo(peek(-1), local, false);
forgetAllRegs(getLocal(n));
localFe->resetSynced();
return;
}
bool wasSynced = localFe->type.synced();
/* Detect something like (x = x) which is a no-op. */
FrameEntry *top = peek(-1);
if (top->isCopy() && top->copyOf() == localFe) {
JS_ASSERT(localFe->isCopied());
return;
}
/* Completely invalidate the local variable. */
if (localFe->isCopied()) {
uncopy(localFe);
if (!localFe->isCopied())
forgetAllRegs(localFe);
} else {
forgetAllRegs(localFe);
}
localFe->resetUnsynced();
/* Constants are easy to propagate. */
if (top->isConstant()) {
localFe->setCopyOf(NULL);
localFe->setNotCopied();
localFe->setConstant(Jsvalify(top->getValue()));
return;
}
/*
* When dealing with copies, there are two important invariants:
*
* 1) The backing store precedes all copies in the tracker.
* 2) The backing store of a local is never a stack slot, UNLESS the local
* variable itself is a stack slot (blocks) that precedes the stack
* slot.
*
* If the top is a copy, and the second condition holds true, the local
* can be rewritten as a copy of the original backing slot. If the first
* condition does not hold, force it to hold by swapping in-place.
*/
FrameEntry *backing = top;
if (top->isCopy()) {
backing = top->copyOf();
JS_ASSERT(backing->trackerIndex() < top->trackerIndex());
uint32 backingIndex = indexOfFe(backing);
uint32 tol = uint32(spBase - base);
if (backingIndex < tol || backingIndex < localIndex(n)) {
/* local.idx < backing.idx means local cannot be a copy yet */
if (localFe->trackerIndex() < backing->trackerIndex())
swapInTracker(backing, localFe);
localFe->setNotCopied();
localFe->setCopyOf(backing);
if (backing->isTypeKnown())
localFe->setType(backing->getKnownType());
else
localFe->type.invalidate();
localFe->data.invalidate();
localFe->isNumber = backing->isNumber;
return;
}
/*
* If control flow lands here, then there was a bytecode sequence like
*
* ENTERBLOCK 2
* GETLOCAL 1
* SETLOCAL 0
*
* The problem is slot N can't be backed by M if M could be popped
* before N. We want a guarantee that when we pop M, even if it was
* copied, it has no outstanding copies.
*
* Because of |let| expressions, it's kind of hard to really know
* whether a region on the stack will be popped all at once. Bleh!
*
* This should be rare except in browser code (and maybe even then),
* but even so there's a quick workaround. We take all copies of the
* backing fe, and redirect them to be copies of the destination.
*/
FrameEntry *tos = tosFe();
for (uint32 i = backing->trackerIndex() + 1; i < tracker.nentries; i++) {
FrameEntry *fe = tracker[i];
//.........这里部分代码省略.........
示例5: tosFe
FrameEntry *
FrameState::uncopy(FrameEntry *original)
{
JS_ASSERT(original->isCopied());
/*
* Copies have two critical invariants:
* 1) The backing store precedes all copies in the tracker.
* 2) The backing store of a copy cannot be popped from the stack
* while the copy is still live.
*
* Maintaining this invariant iteratively is kind of hard, so we choose
* the "lowest" copy in the frame up-front.
*
* For example, if the stack is:
* [A, B, C, D]
* And the tracker has:
* [A, D, C, B]
*
* If B, C, and D are copies of A - we will walk the tracker to the end
* and select D, not B (see bug 583684).
*/
uint32 firstCopy = InvalidIndex;
FrameEntry *tos = tosFe();
FrameEntry *bestFe = NULL;
uint32 ncopies = 0;
for (uint32 i = 0; i < tracker.nentries; i++) {
FrameEntry *fe = tracker[i];
if (fe >= tos)
continue;
if (fe->isCopy() && fe->copyOf() == original) {
if (firstCopy == InvalidIndex) {
firstCopy = i;
bestFe = fe;
} else if (fe < bestFe) {
bestFe = fe;
}
ncopies++;
}
}
if (!ncopies) {
JS_ASSERT(firstCopy == InvalidIndex);
JS_ASSERT(!bestFe);
original->copied = false;
return NULL;
}
JS_ASSERT(firstCopy != InvalidIndex);
JS_ASSERT(bestFe);
/* Mark all extra copies as copies of the new backing index. */
bestFe->setCopyOf(NULL);
if (ncopies > 1) {
bestFe->setCopied();
for (uint32 i = firstCopy; i < tracker.nentries; i++) {
FrameEntry *other = tracker[i];
if (other >= tos || other == bestFe)
continue;
/* The original must be tracked before copies. */
JS_ASSERT(other != original);
if (!other->isCopy() || other->copyOf() != original)
continue;
other->setCopyOf(bestFe);
/*
* This is safe even though we're mutating during iteration. There
* are two cases. The first is that both indexes are <= i, and :.
* will never be observed. The other case is we're placing the
* other FE such that it will be observed later. Luckily, copyOf()
* will return != original, so nothing will happen.
*/
if (other->trackerIndex() < bestFe->trackerIndex())
swapInTracker(bestFe, other);
}
} else {
bestFe->setNotCopied();
}
FrameEntry *fe = bestFe;
/*
* Switch the new backing store to the old backing store. During
* this process we also necessarily make sure the copy can be
* synced.
*/
if (!original->isTypeKnown()) {
/*
* If the copy is unsynced, and the original is in memory,
* give the original a register. We do this below too; it's
* okay if it's spilled.
*/
if (original->type.inMemory() && !fe->type.synced())
tempRegForType(original);
fe->type.inherit(original->type);
if (fe->type.inRegister())
moveOwnership(fe->type.reg(), fe);
//.........这里部分代码省略.........
示例6: avail
void
FrameState::sync(Assembler &masm, Uses uses) const
{
/*
* Keep track of free registers using a bitmask. If we have to drop into
* syncFancy(), then this mask will help avoid eviction.
*/
Registers avail(freeRegs);
Registers temp(Registers::TempRegs);
FrameEntry *tos = tosFe();
FrameEntry *bottom = tos - uses.nuses;
if (inTryBlock)
bottom = NULL;
for (uint32 i = tracker.nentries - 1; i < tracker.nentries; i--) {
FrameEntry *fe = tracker[i];
if (fe >= tos)
continue;
Address address = addressOf(fe);
if (!fe->isCopy()) {
/* Keep track of registers that can be clobbered. */
if (fe->data.inRegister())
avail.putReg(fe->data.reg());
if (fe->type.inRegister())
avail.putReg(fe->type.reg());
/* Sync. */
if (!fe->data.synced() && (fe->data.inRegister() || fe >= bottom)) {
syncData(fe, address, masm);
if (fe->isConstant())
continue;
}
if (!fe->type.synced() && (fe->type.inRegister() || fe >= bottom))
syncType(fe, addressOf(fe), masm);
} else if (fe >= bottom) {
FrameEntry *backing = fe->copyOf();
JS_ASSERT(backing != fe);
JS_ASSERT(!backing->isConstant() && !fe->isConstant());
/*
* If the copy is backed by something not in a register, fall back
* to a slower sync algorithm.
*/
if ((!fe->type.synced() && !backing->type.inRegister()) ||
(!fe->data.synced() && !backing->data.inRegister())) {
syncFancy(masm, avail, i, bottom);
return;
}
if (!fe->type.synced()) {
/* :TODO: we can do better, the type is learned for all copies. */
if (fe->isTypeKnown()) {
//JS_ASSERT(fe->getTypeTag() == backing->getTypeTag());
masm.storeTypeTag(ImmType(fe->getKnownType()), address);
} else {
masm.storeTypeTag(backing->type.reg(), address);
}
}
if (!fe->data.synced())
masm.storePayload(backing->data.reg(), address);
}
}
}