本文整理汇总了C++中FrameEntry::copyOf方法的典型用法代码示例。如果您正苦于以下问题:C++ FrameEntry::copyOf方法的具体用法?C++ FrameEntry::copyOf怎么用?C++ FrameEntry::copyOf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类FrameEntry
的用法示例。
在下文中一共展示了FrameEntry::copyOf方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: tosFe
void
FrameState::assertValidRegisterState() const
{
Registers checkedFreeRegs;
FrameEntry *tos = tosFe();
for (uint32 i = 0; i < tracker.nentries; i++) {
FrameEntry *fe = tracker[i];
if (fe >= tos)
continue;
JS_ASSERT(i == fe->trackerIndex());
JS_ASSERT_IF(fe->isCopy(),
fe->trackerIndex() > fe->copyOf()->trackerIndex());
JS_ASSERT_IF(fe->isCopy(), !fe->type.inRegister() && !fe->data.inRegister());
JS_ASSERT_IF(fe->isCopy(), fe->copyOf() < tos);
JS_ASSERT_IF(fe->isCopy(), fe->copyOf()->isCopied());
if (fe->isCopy())
continue;
if (fe->type.inRegister()) {
checkedFreeRegs.takeReg(fe->type.reg());
JS_ASSERT(regstate[fe->type.reg()].fe == fe);
}
if (fe->data.inRegister()) {
checkedFreeRegs.takeReg(fe->data.reg());
JS_ASSERT(regstate[fe->data.reg()].fe == fe);
}
}
JS_ASSERT(checkedFreeRegs == freeRegs);
}
示例2: entryFor
void
FrameState::pushCopyOf(uint32 index)
{
FrameEntry *backing = entryFor(index);
FrameEntry *fe = rawPush();
fe->resetUnsynced();
if (backing->isConstant()) {
fe->setConstant(Jsvalify(backing->getValue()));
} else {
if (backing->isTypeKnown())
fe->setType(backing->getKnownType());
else
fe->type.invalidate();
fe->isNumber = backing->isNumber;
fe->data.invalidate();
if (backing->isCopy()) {
backing = backing->copyOf();
fe->setCopyOf(backing);
} else {
fe->setCopyOf(backing);
backing->setCopied();
}
/* Maintain tracker ordering guarantees for copies. */
JS_ASSERT(backing->isCopied());
if (fe->trackerIndex() < backing->trackerIndex())
swapInTracker(fe, backing);
}
}
示例3: local
void
FrameState::storeLocal(uint32 n, bool popGuaranteed, bool typeChange)
{
FrameEntry *localFe = getLocal(n);
bool cacheable = !eval && !escaping[n];
if (!popGuaranteed && !cacheable) {
JS_ASSERT_IF(base[localIndex(n)] && (!eval || n < script->nfixed),
entries[localIndex(n)].type.inMemory() &&
entries[localIndex(n)].data.inMemory());
Address local(JSFrameReg, sizeof(JSStackFrame) + n * sizeof(Value));
storeTo(peek(-1), local, false);
forgetAllRegs(getLocal(n));
localFe->resetSynced();
return;
}
bool wasSynced = localFe->type.synced();
/* Detect something like (x = x) which is a no-op. */
FrameEntry *top = peek(-1);
if (top->isCopy() && top->copyOf() == localFe) {
JS_ASSERT(localFe->isCopied());
return;
}
/* Completely invalidate the local variable. */
if (localFe->isCopied()) {
uncopy(localFe);
if (!localFe->isCopied())
forgetAllRegs(localFe);
} else {
forgetAllRegs(localFe);
}
localFe->resetUnsynced();
/* Constants are easy to propagate. */
if (top->isConstant()) {
localFe->setCopyOf(NULL);
localFe->setNotCopied();
localFe->setConstant(Jsvalify(top->getValue()));
return;
}
/*
* When dealing with copies, there are two important invariants:
*
* 1) The backing store precedes all copies in the tracker.
* 2) The backing store of a local is never a stack slot, UNLESS the local
* variable itself is a stack slot (blocks) that precedes the stack
* slot.
*
* If the top is a copy, and the second condition holds true, the local
* can be rewritten as a copy of the original backing slot. If the first
* condition does not hold, force it to hold by swapping in-place.
*/
FrameEntry *backing = top;
if (top->isCopy()) {
backing = top->copyOf();
JS_ASSERT(backing->trackerIndex() < top->trackerIndex());
uint32 backingIndex = indexOfFe(backing);
uint32 tol = uint32(spBase - base);
if (backingIndex < tol || backingIndex < localIndex(n)) {
/* local.idx < backing.idx means local cannot be a copy yet */
if (localFe->trackerIndex() < backing->trackerIndex())
swapInTracker(backing, localFe);
localFe->setNotCopied();
localFe->setCopyOf(backing);
if (backing->isTypeKnown())
localFe->setType(backing->getKnownType());
else
localFe->type.invalidate();
localFe->data.invalidate();
localFe->isNumber = backing->isNumber;
return;
}
/*
* If control flow lands here, then there was a bytecode sequence like
*
* ENTERBLOCK 2
* GETLOCAL 1
* SETLOCAL 0
*
* The problem is slot N can't be backed by M if M could be popped
* before N. We want a guarantee that when we pop M, even if it was
* copied, it has no outstanding copies.
*
* Because of |let| expressions, it's kind of hard to really know
* whether a region on the stack will be popped all at once. Bleh!
*
* This should be rare except in browser code (and maybe even then),
* but even so there's a quick workaround. We take all copies of the
* backing fe, and redirect them to be copies of the destination.
*/
FrameEntry *tos = tosFe();
for (uint32 i = backing->trackerIndex() + 1; i < tracker.nentries; i++) {
FrameEntry *fe = tracker[i];
//.........这里部分代码省略.........
示例4: avail
void
FrameState::sync(Assembler &masm, Uses uses) const
{
/*
* Keep track of free registers using a bitmask. If we have to drop into
* syncFancy(), then this mask will help avoid eviction.
*/
Registers avail(freeRegs);
Registers temp(Registers::TempRegs);
FrameEntry *tos = tosFe();
FrameEntry *bottom = tos - uses.nuses;
if (inTryBlock)
bottom = NULL;
for (uint32 i = tracker.nentries - 1; i < tracker.nentries; i--) {
FrameEntry *fe = tracker[i];
if (fe >= tos)
continue;
Address address = addressOf(fe);
if (!fe->isCopy()) {
/* Keep track of registers that can be clobbered. */
if (fe->data.inRegister())
avail.putReg(fe->data.reg());
if (fe->type.inRegister())
avail.putReg(fe->type.reg());
/* Sync. */
if (!fe->data.synced() && (fe->data.inRegister() || fe >= bottom)) {
syncData(fe, address, masm);
if (fe->isConstant())
continue;
}
if (!fe->type.synced() && (fe->type.inRegister() || fe >= bottom))
syncType(fe, addressOf(fe), masm);
} else if (fe >= bottom) {
FrameEntry *backing = fe->copyOf();
JS_ASSERT(backing != fe);
JS_ASSERT(!backing->isConstant() && !fe->isConstant());
/*
* If the copy is backed by something not in a register, fall back
* to a slower sync algorithm.
*/
if ((!fe->type.synced() && !backing->type.inRegister()) ||
(!fe->data.synced() && !backing->data.inRegister())) {
syncFancy(masm, avail, i, bottom);
return;
}
if (!fe->type.synced()) {
/* :TODO: we can do better, the type is learned for all copies. */
if (fe->isTypeKnown()) {
//JS_ASSERT(fe->getTypeTag() == backing->getTypeTag());
masm.storeTypeTag(ImmType(fe->getKnownType()), address);
} else {
masm.storeTypeTag(backing->type.reg(), address);
}
}
if (!fe->data.synced())
masm.storePayload(backing->data.reg(), address);
}
}
}
示例5: pinReg
void
FrameState::allocForBinary(FrameEntry *lhs, FrameEntry *rhs, JSOp op, BinaryAlloc &alloc,
bool needsResult)
{
FrameEntry *backingLeft = lhs;
FrameEntry *backingRight = rhs;
if (backingLeft->isCopy())
backingLeft = backingLeft->copyOf();
if (backingRight->isCopy())
backingRight = backingRight->copyOf();
/*
* For each remat piece of both FEs, if a register is assigned, get it now
* and pin it. This is safe - constants and known types will be avoided.
*/
if (AllocHelper(backingLeft->type, alloc.lhsType))
pinReg(alloc.lhsType.reg());
if (AllocHelper(backingLeft->data, alloc.lhsData))
pinReg(alloc.lhsData.reg());
if (AllocHelper(backingRight->type, alloc.rhsType))
pinReg(alloc.rhsType.reg());
if (AllocHelper(backingRight->data, alloc.rhsData))
pinReg(alloc.rhsData.reg());
/* For each type without a register, give it a register if needed. */
if (!alloc.lhsType.isSet() && backingLeft->type.inMemory()) {
alloc.lhsType = tempRegForType(lhs);
pinReg(alloc.lhsType.reg());
}
if (!alloc.rhsType.isSet() && backingRight->type.inMemory()) {
alloc.rhsType = tempRegForType(rhs);
pinReg(alloc.rhsType.reg());
}
bool commu;
switch (op) {
case JSOP_EQ:
case JSOP_GT:
case JSOP_GE:
case JSOP_LT:
case JSOP_LE:
/* fall through */
case JSOP_ADD:
case JSOP_MUL:
case JSOP_SUB:
commu = true;
break;
case JSOP_DIV:
commu = false;
break;
default:
JS_NOT_REACHED("unknown op");
return;
}
/*
* Data is a little more complicated. If the op is MUL, not all CPUs
* have multiplication on immediates, so a register is needed. Also,
* if the op is not commutative, the LHS _must_ be in a register.
*/
JS_ASSERT_IF(lhs->isConstant(), !rhs->isConstant());
JS_ASSERT_IF(rhs->isConstant(), !lhs->isConstant());
if (!alloc.lhsData.isSet()) {
if (backingLeft->data.inMemory()) {
alloc.lhsData = tempRegForData(lhs);
pinReg(alloc.lhsData.reg());
} else if (op == JSOP_MUL || !commu) {
JS_ASSERT(lhs->isConstant());
alloc.lhsData = allocReg();
alloc.extraFree = alloc.lhsData;
masm.move(Imm32(lhs->getValue().toInt32()), alloc.lhsData.reg());
}
}
if (!alloc.rhsData.isSet()) {
if (backingRight->data.inMemory()) {
alloc.rhsData = tempRegForData(rhs);
pinReg(alloc.rhsData.reg());
} else if (op == JSOP_MUL) {
JS_ASSERT(rhs->isConstant());
alloc.rhsData = allocReg();
alloc.extraFree = alloc.rhsData;
masm.move(Imm32(rhs->getValue().toInt32()), alloc.rhsData.reg());
}
}
alloc.lhsNeedsRemat = false;
alloc.rhsNeedsRemat = false;
if (!needsResult)
goto skip;
/*
* Now a result register is needed. It must contain a mutable copy of the
* LHS. For commutative operations, we can opt to use the RHS instead. At
* this point, if for some reason either must be in a register, that has
* already been guaranteed at this point.
//.........这里部分代码省略.........