本文整理汇总了C++中FTRACE函数的典型用法代码示例。如果您正苦于以下问题:C++ FTRACE函数的具体用法?C++ FTRACE怎么用?C++ FTRACE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FTRACE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: SrcKey
bool InliningDecider::shouldInline(const Func* callee,
const RegionDesc& region,
uint32_t maxTotalCost) {
auto sk = region.empty() ? SrcKey() : region.start();
assertx(callee);
assertx(sk.func() == callee);
// Tracing return lambdas.
auto refuse = [&] (const char* why) {
FTRACE(1, "shouldInline: rejecting callee region: {}", show(region));
return traceRefusal(m_topFunc, callee, why);
};
auto accept = [&, this] (const char* kind) {
FTRACE(1, "InliningDecider: inlining {}() <- {}()\t<reason: {}>\n",
m_topFunc->fullName()->data(), callee->fullName()->data(), kind);
return true;
};
// Check inlining depths.
if (m_callDepth + 1 >= RuntimeOption::EvalHHIRInliningMaxDepth) {
return refuse("inlining call depth limit exceeded");
}
if (m_stackDepth + callee->maxStackCells() >= kStackCheckLeafPadding) {
return refuse("inlining stack depth limit exceeded");
}
// Even if the func contains NativeImpl we may have broken the trace before
// we hit it.
auto containsNativeImpl = [&] {
for (auto block : region.blocks()) {
if (!block->empty() && block->last().op() == OpNativeImpl) return true;
}
return false;
};
// Try to inline CPP builtin functions.
// The NativeImpl opcode may appear later in the function because of Asserts
// generated in hhbbc
if (callee->isCPPBuiltin() && containsNativeImpl()) {
if (isInlinableCPPBuiltin(callee)) {
return accept("inlinable CPP builtin");
}
return refuse("non-inlinable CPP builtin");
}
// If the function may use a VarEnv (which is stored in the ActRec) or may be
// variadic, we restrict inlined callees to certain whitelisted instructions
// which we know won't actually require these features.
const bool needsCheckVVSafe = callee->attrs() & AttrMayUseVV;
int numRets = 0;
int numExits = 0;
// Iterate through the region, checking its suitability for inlining.
for (auto const& block : region.blocks()) {
sk = block->start();
for (auto i = 0, n = block->length(); i < n; ++i, sk.advance()) {
auto op = sk.op();
// We don't allow inlined functions in the region. The client is
// expected to disable inlining for the region it gives us to peek.
if (sk.func() != callee) {
return refuse("got region with inlined calls");
}
// Restrict to VV-safe opcodes if necessary.
if (needsCheckVVSafe && !isInliningVVSafe(op)) {
return refuse(folly::format("{} may use dynamic environment",
opcodeToName(op)).str().c_str());
}
// Count the returns.
if (isReturnish(op)) {
if (++numRets > RuntimeOption::EvalHHIRInliningMaxReturns) {
return refuse("region has too many returns");
}
continue;
}
// We can't inline FCallArray. XXX: Why?
if (op == Op::FCallArray) {
return refuse("can't inline FCallArray");
}
}
if (region.isExit(block->id())) {
if (++numExits > RuntimeOption::EvalHHIRInliningMaxBindJmps + numRets) {
return refuse("region has too many non return exits");
}
}
}
// Refuse if the cost exceeds our thresholds.
// We measure the cost of inlining each callstack and stop when it exceeds a
// certain threshold. (Note that we do not measure the total cost of all the
// inlined calls for a given caller---just the cost of each nested stack.)
const int maxCost = maxTotalCost - m_cost;
const int cost = computeCost(region);
//.........这里部分代码省略.........
示例2: do_analyze_collect
FuncAnalysis do_analyze_collect(const Index& index,
Context const inputCtx,
CollectedInfo& collect,
ClassAnalysis* clsAnalysis,
const std::vector<Type>* knownArgs) {
auto const ctx = adjust_closure_context(inputCtx);
FuncAnalysis ai(ctx);
Trace::Bump bumper{Trace::hhbbc, kTraceFuncBump,
is_trace_function(ctx.cls, ctx.func)};
FTRACE(2, "{:-^70}\n-- {}\n", "Analyze", show(ctx));
/*
* Set of RPO ids that still need to be visited.
*
* Initially, we need each entry block in this list. As we visit
* blocks, we propagate states to their successors and across their
* back edges---when state merges cause a change to the block
* stateIn, we will add it to this queue so it gets visited again.
*/
auto incompleteQ = prepare_incompleteQ(index, ai, clsAnalysis, knownArgs);
/*
* There are potentially infinitely growing types when we're using
* union_of to merge states, so occasonially we need to apply a
* widening operator.
*
* Currently this is done by having a straight-forward hueristic: if
* you visit a block too many times, we'll start doing all the
* merges with the widening operator until we've had a chance to
* visit the block again. We must then continue iterating in case
* the actual fixed point is higher than the result of widening.
*
* Terminiation is guaranteed because the widening operator has only
* finite chains in the type lattice.
*/
auto nonWideVisits = std::vector<uint32_t>(ctx.func->nextBlockId);
// For debugging, count how many times basic blocks get interpreted.
auto interp_counter = uint32_t{0};
/*
* Iterate until a fixed point.
*
* Each time a stateIn for a block changes, we re-insert the block's
* rpo ID in incompleteQ. Since incompleteQ is ordered, we'll
* always visit blocks with earlier RPO ids first, which hopefully
* means less iterations.
*/
while (!incompleteQ.empty()) {
auto const blk = ai.rpoBlocks[incompleteQ.pop()];
if (nonWideVisits[blk->id]++ > options.analyzeFuncWideningLimit) {
nonWideVisits[blk->id] = 0;
}
FTRACE(2, "block #{}\nin {}{}", blk->id,
state_string(*ctx.func, ai.bdata[blk->id].stateIn),
property_state_string(collect.props));
++interp_counter;
auto propagate = [&] (php::Block& target, const State& st) {
auto const needsWiden =
nonWideVisits[target.id] >= options.analyzeFuncWideningLimit;
// We haven't optimized the widening operator much, because it
// doesn't happen in practice right now. We want to know when
// it starts happening:
if (needsWiden) {
std::fprintf(stderr, "widening in %s on %s\n",
ctx.unit->filename->data(),
ctx.func->name->data());
}
FTRACE(2, " {}-> {}\n", needsWiden ? "widening " : "", target.id);
FTRACE(4, "target old {}",
state_string(*ctx.func, ai.bdata[target.id].stateIn));
auto const changed =
needsWiden ? widen_into(ai.bdata[target.id].stateIn, st)
: merge_into(ai.bdata[target.id].stateIn, st);
if (changed) {
incompleteQ.push(rpoId(ai, &target));
}
FTRACE(4, "target new {}",
state_string(*ctx.func, ai.bdata[target.id].stateIn));
};
auto stateOut = ai.bdata[blk->id].stateIn;
auto interp = Interp { index, ctx, collect, blk, stateOut };
auto flags = run(interp, propagate);
if (flags.returned) {
ai.inferredReturn = union_of(std::move(ai.inferredReturn),
std::move(*flags.returned));
}
}
ai.closureUseTypes = std::move(collect.closureUseTypes);
if (ctx.func->isGenerator) {
//.........这里部分代码省略.........
示例3: selectHotTrace
RegionDescPtr selectHotTrace(TransID triggerId,
const ProfData* profData,
TransCFG& cfg,
TransIDSet& selectedSet,
TransIDVec* selectedVec) {
auto region = std::make_shared<RegionDesc>();
TransID tid = triggerId;
TransID prevId = kInvalidTransID;
selectedSet.clear();
if (selectedVec) selectedVec->clear();
PostConditions accumPostConds;
// Maps BlockIds to the set of BC offsets for its successor blocks.
// Used to prevent multiple successors with the same SrcKey for now.
// This can go away once task #4157613 is done.
hphp_hash_map<RegionDesc::BlockId, SrcKeySet> succSKSet;
// Maps from BlockIds to accumulated post conditions for that block.
// Used to determine if we can add branch-over edges by checking the
// pre-conditions of the successor block.
hphp_hash_map<RegionDesc::BlockId, PostConditions> blockPostConds;
while (!selectedSet.count(tid)) {
RegionDescPtr blockRegion = profData->transRegion(tid);
if (blockRegion == nullptr) break;
// If the debugger is attached, only allow single-block regions.
if (prevId != kInvalidTransID && isDebuggerAttachedProcess()) {
FTRACE(2, "selectHotTrace: breaking region at Translation {} "
"because of debugger is attached\n", tid);
break;
}
// Break if block is not the first and requires reffiness checks.
// Task #2589970: fix translateRegion to support mid-region reffiness checks
if (prevId != kInvalidTransID) {
auto nRefDeps = blockRegion->entry()->reffinessPreds().size();
if (nRefDeps > 0) {
FTRACE(2, "selectHotTrace: breaking region because of refDeps ({}) at "
"Translation {}\n", nRefDeps, tid);
break;
}
}
// Break if block is not the first and it corresponds to the main
// function body entry. This is to prevent creating multiple
// large regions containing the function body (starting at various
// DV funclets).
if (prevId != kInvalidTransID) {
const Func* func = profData->transFunc(tid);
Offset bcOffset = profData->transStartBcOff(tid);
if (func->base() == bcOffset) {
FTRACE(2, "selectHotTrace: breaking region because reached the main "
"function body entry at Translation {} (BC offset {})\n",
tid, bcOffset);
break;
}
}
if (prevId != kInvalidTransID) {
auto sk = profData->transSrcKey(tid);
if (profData->optimized(sk)) {
FTRACE(2, "selectHotTrace: breaking region because next sk already "
"optimized, for Translation {}\n", tid);
break;
}
}
// Break trace if translation tid cannot follow the execution of
// the entire translation prevId. This can only happen if the
// execution of prevId takes a side exit that leads to the
// execution of tid.
if (prevId != kInvalidTransID) {
Op* lastInstr = profData->transLastInstr(prevId);
const Unit* unit = profData->transFunc(prevId)->unit();
OffsetSet succOffs = instrSuccOffsets(lastInstr, unit);
if (!succOffs.count(profData->transSrcKey(tid).offset())) {
if (HPHP::Trace::moduleEnabled(HPHP::Trace::pgo, 2)) {
FTRACE(2, "selectHotTrace: WARNING: Breaking region @: {}\n",
show(*region));
FTRACE(2, "selectHotTrace: next translation selected: tid = {}\n{}\n",
tid, show(*blockRegion));
FTRACE(2, "\nsuccOffs = {}\n", folly::join(", ", succOffs));
}
break;
}
}
bool hasPredBlock = !region->empty();
RegionDesc::BlockId predBlockId = (hasPredBlock ?
region->blocks().back().get()->id() : 0);
auto const& newFirstBlock = blockRegion->entry();
auto newFirstBlockId = newFirstBlock->id();
auto newFirstBlockSk = newFirstBlock->start();
auto newLastBlockId = blockRegion->blocks().back()->id();
// Make sure we don't end up with multiple successors for the same
// SrcKey. Task #4157613 will allow the following check to go away.
// This needs to be done before we insert blockRegion into region,
//.........这里部分代码省略.........
示例4: FTRACE
void FrameState::update(const IRInstruction* inst) {
FTRACE(3, "FrameState::update processing {}\n", *inst);
if (auto* taken = inst->taken()) {
// When we're building the IR, we append a conditional jump after
// generating its target block: see emitJmpCondHelper, where we
// call makeExit() before gen(JmpZero). It doesn't make sense to
// update the target block state at this point, so don't. The
// state doesn't have this problem during optimization passes,
// because we'll always process the jump before the target block.
if (!m_building || taken->empty()) save(taken);
}
auto const opc = inst->op();
getLocalEffects(inst, *this);
switch (opc) {
case DefInlineFP: trackDefInlineFP(inst); break;
case InlineReturn: trackInlineReturn(inst); break;
case Call:
m_spValue = inst->dst();
m_frameSpansCall = true;
// A call pops the ActRec and pushes a return value.
m_spOffset -= kNumActRecCells;
m_spOffset += 1;
assert(m_spOffset >= 0);
clearCse();
break;
case CallArray:
m_spValue = inst->dst();
m_frameSpansCall = true;
// A CallArray pops the ActRec an array arg and pushes a return value.
m_spOffset -= kNumActRecCells;
assert(m_spOffset >= 0);
clearCse();
break;
case ContEnter:
clearCse();
break;
case DefFP:
case FreeActRec:
m_fpValue = inst->dst();
break;
case ReDefResumableSP:
m_spValue = inst->dst();
break;
case ReDefSP:
m_spValue = inst->dst();
m_spOffset = inst->extra<ReDefSP>()->spOffset;
break;
case DefInlineSP:
case DefSP:
m_spValue = inst->dst();
m_spOffset = inst->extra<StackOffset>()->offset;
break;
case AssertStk:
case CastStk:
case CoerceStk:
case CheckStk:
case GuardStk:
case ExceptionBarrier:
m_spValue = inst->dst();
break;
case SpillStack: {
m_spValue = inst->dst();
// Push the spilled values but adjust for the popped values
int64_t stackAdjustment = inst->src(1)->intVal();
m_spOffset -= stackAdjustment;
m_spOffset += spillValueCells(inst);
break;
}
case SpillFrame:
case CufIterSpillFrame:
m_spValue = inst->dst();
m_spOffset += kNumActRecCells;
break;
case InterpOne:
case InterpOneCF: {
m_spValue = inst->dst();
auto const& extra = *inst->extra<InterpOneData>();
int64_t stackAdjustment = extra.cellsPopped - extra.cellsPushed;
// push the return value if any and adjust for the popped values
m_spOffset -= stackAdjustment;
break;
}
case AssertLoc:
case GuardLoc:
//.........这里部分代码省略.........
示例5: find_directory
// fill out the icon with the stop symbol from app_server
void
ConflictView::_FillSavedIcon()
{
// return if the fSavedIcon has already been filled out
if (fSavedIcon != NULL && fSavedIcon->InitCheck() == B_OK)
return;
BPath path;
status_t status = find_directory(B_BEOS_SERVERS_DIRECTORY, &path);
if (status < B_OK) {
FTRACE((stderr,
"_FillWarningIcon() - find_directory failed: %s\n",
strerror(status)));
delete fSavedIcon;
fSavedIcon = NULL;
return;
}
path.Append("app_server");
BFile file;
status = file.SetTo(path.Path(), B_READ_ONLY);
if (status < B_OK) {
FTRACE((stderr,
"_FillWarningIcon() - BFile init failed: %s\n",
strerror(status)));
delete fSavedIcon;
fSavedIcon = NULL;
return;
}
BResources resources;
status = resources.SetTo(&file);
if (status < B_OK) {
FTRACE((stderr,
"_WarningIcon() - BResources init failed: %s\n",
strerror(status)));
delete fSavedIcon;
fSavedIcon = NULL;
return;
}
// Allocate the fSavedIcon bitmap
fSavedIcon = new(std::nothrow) BBitmap(BRect(0, 0, 15, 15), 0, B_RGBA32);
if (fSavedIcon->InitCheck() < B_OK) {
FTRACE((stderr, "_WarningIcon() - No memory for warning bitmap\n"));
delete fSavedIcon;
fSavedIcon = NULL;
return;
}
// Load the raw stop icon data
size_t size = 0;
const uint8* rawIcon;
rawIcon = (const uint8*)resources.LoadResource(B_VECTOR_ICON_TYPE,
"stop", &size);
// load vector warning icon into fSavedIcon
if (rawIcon == NULL
|| BIconUtils::GetVectorIcon(rawIcon, size, fSavedIcon) < B_OK) {
delete fSavedIcon;
fSavedIcon = NULL;
}
}
示例6: CActive
/*
-------------------------------------------------------------------------------
Class: CSimpleTimeout
Method: CSimpleTimeout
Description: Default constructor
C++ default constructor can NOT contain any code, that
might leave.
Parameters: None
Return Values: None
Errors/Exceptions: None
Status: Approved
-------------------------------------------------------------------------------
*/
CSimpleTimeout::CSimpleTimeout() : CActive (CActive::EPriorityStandard)
{
FTRACE(FPrint(_L("CSimpleTimeout::CSimpleTimeout")));
}
示例7: FTRACE
/*
* reoptimize() runs a trace through a second pass of TraceBuilder
* optimizations, like this:
*
* reset state.
* move all blocks to a temporary list.
* compute immediate dominators.
* for each block in trace order:
* if we have a snapshot state for this block:
* clear cse entries that don't dominate this block.
* use snapshot state.
* move all instructions to a temporary list.
* for each instruction:
* optimizeWork - do CSE and simplify again
* if not simplified:
* append existing instruction and update state.
* else:
* if the instruction has a result, insert a mov from the
* simplified tmp to the original tmp and discard the instruction.
* if the last conditional branch was turned into a jump, remove the
* fall-through edge to the next block.
*/
void TraceBuilder::reoptimize() {
FTRACE(5, "ReOptimize:vvvvvvvvvvvvvvvvvvvv\n");
SCOPE_EXIT { FTRACE(5, "ReOptimize:^^^^^^^^^^^^^^^^^^^^\n"); };
assert(m_curTrace->isMain());
assert(m_savedTraces.empty());
m_state.setEnableCse(RuntimeOption::EvalHHIRCse);
m_enableSimplification = RuntimeOption::EvalHHIRSimplification;
if (!m_state.enableCse() && !m_enableSimplification) return;
always_assert(!m_inReoptimize);
m_inReoptimize = true;
BlockList sortedBlocks = rpoSortCfg(m_unit);
auto const idoms = findDominators(m_unit, sortedBlocks);
m_state.clear();
auto blocks = std::move(m_curTrace->blocks());
assert(m_curTrace->blocks().empty());
while (!blocks.empty()) {
Block* block = blocks.front();
blocks.pop_front();
assert(block->trace() == m_curTrace);
FTRACE(5, "Block: {}\n", block->id());
assert(m_curTrace->isMain());
m_state.startBlock(block);
m_curTrace->push_back(block);
auto instructions = std::move(block->instrs());
assert(block->empty());
while (!instructions.empty()) {
auto *inst = &instructions.front();
instructions.pop_front();
m_state.setMarker(inst->marker());
// merging state looks at the current marker, and optimizeWork
// below may create new instructions. Use the marker from this
// instruction.
assert(inst->marker().valid());
setMarker(inst->marker());
auto const tmp = optimizeWork(inst, idoms); // Can generate new instrs!
if (!tmp) {
// Could not optimize; keep the old instruction
appendInstruction(inst, block);
m_state.update(inst);
continue;
}
SSATmp* dst = inst->dst();
if (dst->type() != Type::None && dst != tmp) {
// The result of optimization has a different destination than the inst.
// Generate a mov(tmp->dst) to get result into dst. If we get here then
// assume the last instruction in the block isn't a guard. If it was,
// we would have to insert the mov on the fall-through edge.
assert(block->empty() || !block->back().isBlockEnd());
IRInstruction* mov = m_unit.mov(dst, tmp, inst->marker());
appendInstruction(mov, block);
m_state.update(mov);
}
// Not re-adding inst; remove the inst->taken edge
if (inst->taken()) inst->setTaken(nullptr);
}
if (block->empty()) {
// If all the instructions in the block were optimized away, remove it
// from the trace.
auto it = m_curTrace->blocks().end();
--it;
assert(*it == block);
m_curTrace->unlink(it);
} else {
if (block->back().isTerminal()) {
// Could have converted a conditional branch to Jmp; clear next.
block->setNext(nullptr);
}
m_state.finishBlock(block);
}
}
//.........这里部分代码省略.........
示例8: FTRACE
void IRTranslator::interpretInstr(const NormalizedInstruction& i) {
FTRACE(5, "HHIR: BC Instr {}\n", i.toString());
m_hhbcTrans.emitInterpOne(i);
}
示例9: shouldIRInline
bool shouldIRInline(const Func* caller, const Func* callee, RegionIter& iter) {
if (!RuntimeOption::EvalHHIREnableGenTimeInlining) {
return false;
}
if (arch() == Arch::ARM) {
// TODO(#3331014): hack until more ARM codegen is working.
return false;
}
if (caller->isPseudoMain()) {
// TODO(#4238160): Hack inlining into pseudomain callsites is still buggy
return false;
}
auto refuse = [&](const char* why) -> bool {
FTRACE(1, "shouldIRInline: refusing {} <reason: {}> [NI = {}]\n",
callee->fullName()->data(), why,
iter.finished() ? "<end>" : iter.sk().showInst());
return false;
};
auto accept = [&](const char* kind) -> bool {
FTRACE(1, "shouldIRInline: inlining {} <kind: {}>\n",
callee->fullName()->data(), kind);
return true;
};
if (callee->numIterators() != 0) {
return refuse("iterators");
}
if (callee->isMagic() || Func::isSpecial(callee->name())) {
return refuse("special or magic function");
}
if (callee->attrs() & AttrMayUseVV) {
return refuse("may use dynamic environment");
}
if (callee->isResumable()) {
return refuse("resumables");
}
if (callee->numSlotsInFrame() + callee->maxStackCells() >=
kStackCheckLeafPadding) {
return refuse("function stack depth too deep");
}
////////////
assert(!iter.finished() && "shouldIRInline given empty region");
bool hotCallingCold = !(callee->attrs() & AttrHot) &&
(caller->attrs() & AttrHot);
uint64_t cost = 0;
int inlineDepth = 0;
Op op = OpLowInvalid;
smart::vector<const Func*> funcs;
const Func* func = callee;
funcs.push_back(func);
for (; !iter.finished(); iter.advance()) {
// If func has changed after an FCall, we've started an inlined call. This
// will have to change when we support inlining recursive calls.
if (func != iter.sk().func()) {
assert(isRet(op) || op == Op::FCall || op == Op::FCallD);
if (op == Op::FCall || op == Op::FCallD) {
funcs.push_back(iter.sk().func());
int totalDepth = 0;
for (auto* f : funcs) {
totalDepth += f->numSlotsInFrame() + f->maxStackCells();
}
if (totalDepth >= kStackCheckLeafPadding) {
return refuse("stack too deep after nested inlining");
}
++inlineDepth;
}
}
op = iter.sk().op();
func = iter.sk().func();
// If we hit a RetC/V while inlining, leave that level and
// continue. Otherwise, accept the tracelet.
if (isRet(op)) {
if (inlineDepth > 0) {
--inlineDepth;
funcs.pop_back();
continue;
} else {
assert(inlineDepth == 0);
return accept("entire function fits in one region");
}
}
if (op == Op::FCallArray) return refuse("FCallArray");
// These opcodes don't indicate any additional work in the callee,
// so they shouldn't count toward the inlining cost.
if (op == Op::AssertRATL || op == Op::AssertRATStk) {
continue;
}
cost += 1;
// Check for an immediate vector, and if it's present add its size to the
// cost.
auto const pc = reinterpret_cast<const Op*>(iter.sk().pc());
//.........这里部分代码省略.........
示例10: sortRegion
/**
* Sorts the regions vector in a linear order to be used for
* translation. The goal is to obtain an order that improves locality
* when the function is executed.
*/
static void sortRegion(RegionVec& regions,
const Func* func,
const TransCFG& cfg,
const ProfData* profData,
const TransIDToRegionMap& headToRegion,
const RegionToTransIDsMap& regionToTransIds) {
RegionVec sorted;
RegionSet selected;
if (regions.size() == 0) return;
// First, pick the region starting at the lowest bytecode offset.
// This will normally correspond to the main function entry (for
// normal, regular bytecode), but it may not be for irregular
// functions written in hhas (like array_map and array_filter). If
// there multiple regions starting at the lowest bytecode offset,
// pick the one with the largest profile weight.
RegionDescPtr entryRegion = nullptr;
int64_t maxEntryWeight = -1;
Offset lowestOffset = kInvalidOffset;
for (const auto& pair : regionToTransIds) {
auto r = pair.first;
auto& tids = pair.second;
TransID firstTid = tids[0];
Offset firstOffset = profData->transSrcKey(firstTid).offset();
int64_t weight = cfg.weight(firstTid);
if (lowestOffset == kInvalidOffset || firstOffset < lowestOffset ||
(firstOffset == lowestOffset && weight > maxEntryWeight)) {
entryRegion = r;
maxEntryWeight = weight;
lowestOffset = firstOffset;
}
}
assert(entryRegion);
sorted.push_back(entryRegion);
selected.insert(entryRegion);
RegionDescPtr region = entryRegion;
// Select the remaining regions, iteratively picking the most likely
// region to execute next.
for (auto i = 1; i < regions.size(); i++) {
int64_t maxWeight = -1;
int64_t maxHeadWeight = -1;
RegionDescPtr bestNext = nullptr;
auto regionTransIds = getRegionTransIDVec(regionToTransIds, region);
for (auto next : regions) {
if (setContains(selected, next)) continue;
auto nextTransIds = getRegionTransIDVec(regionToTransIds, next);
int64_t weight = interRegionWeight(regionTransIds, nextTransIds[0], cfg);
int64_t headWeight = cfg.weight(nextTransIds[0]);
if ((weight > maxWeight) ||
(weight == maxWeight && headWeight > maxHeadWeight)) {
maxWeight = weight;
maxHeadWeight = headWeight;
bestNext = next;
}
}
assert(bestNext);
sorted.push_back(bestNext);
selected.insert(bestNext);
region = bestNext;
}
assert(sorted.size() == regions.size());
regions = sorted;
if (debug && Trace::moduleEnabled(HPHP::Trace::pgo, 5)) {
for (size_t i = 0; i < regions.size(); i++) {
auto r = regions[i];
auto tids = getRegionTransIDVec(regionToTransIds, r);
std::string transIds = folly::join(", ", tids);
FTRACE(6, "sortRegion: region[{}]: {}\n", i, transIds);
}
}
}
示例11: regionizeFunc
/**
* Regionize a func, so that each node and each arc in its TransCFG is
* "covered". A node is covered if any region contains it. An arc T1->T2
* is covered if either:
*
* a) T1 and T2 are in the same region R and T2 immediately follows
* T1 in R.
* b) T2 is the head (first translation) of a region.
*
* Basic algorithm:
*
* 1) sort nodes in decreasing weight order
* 2) for each node N:
* 2.1) if N and all its incoming arcs are covered, then continue
* 2.2) select a region starting at this node and mark nodes/arcs as
* covered appropriately
*/
void regionizeFunc(const Func* func,
JIT::TranslatorX64* tx64,
RegionVec& regions) {
assert(RuntimeOption::EvalJitPGO);
FuncId funcId = func->getFuncId();
ProfData* profData = tx64->profData();
TransCFG cfg(funcId, profData, tx64->getSrcDB(), tx64->getJmpToTransIDMap());
if (Trace::moduleEnabled(HPHP::Trace::pgo, 5)) {
string dotFileName = folly::to<string>("/tmp/func-cfg-", funcId, ".dot");
cfg.print(dotFileName, funcId, profData, nullptr);
FTRACE(5, "regionizeFunc: initial CFG for func {} saved to file {}\n",
funcId, dotFileName);
}
TransCFG::ArcPtrVec arcs = cfg.arcs();
vector<TransID> nodes = cfg.nodes();
std::sort(nodes.begin(), nodes.end(),
[&](TransID tid1, TransID tid2) -> bool {
if (cfg.weight(tid1) != cfg.weight(tid2)) {
return cfg.weight(tid1) > cfg.weight(tid2);
}
// In case of ties, pick older translations first, in an
// attempt to start loops at their headers.
return tid1 < tid2;
});
TransCFG::ArcPtrSet coveredArcs;
TransIDSet coveredNodes;
TransIDSet heads;
TransIDToRegionMap headToRegion;
RegionToTransIDsMap regionToTransIds;
regions.clear();
for (auto node : nodes) {
if (!setContains(coveredNodes, node) ||
!allArcsCovered(cfg.inArcs(node), coveredArcs)) {
TransID newHead = node;
FTRACE(6, "regionizeFunc: selecting trace to cover node {}\n", newHead);
TransIDSet selectedSet;
TransIDVec selectedVec;
RegionDescPtr region = selectHotTrace(newHead, profData, cfg,
selectedSet, &selectedVec);
profData->setOptimized(profData->transSrcKey(newHead));
assert(selectedVec.size() > 0 && selectedVec[0] == newHead);
regions.push_back(region);
heads.insert(newHead);
markCovered(cfg, selectedVec, heads, coveredNodes, coveredArcs);
regionToTransIds[region] = selectedVec;
headToRegion[newHead] = region;
FTRACE(6, "regionizeFunc: selected trace: {}\n",
folly::join(", ", selectedVec));
}
}
assert(coveredNodes.size() == cfg.nodes().size());
assert(coveredArcs.size() == arcs.size());
sortRegion(regions, func, cfg, profData, headToRegion, regionToTransIds);
if (debug && Trace::moduleEnabled(HPHP::Trace::pgo, 5)) {
FTRACE(5, "\n--------------------------------------------\n"
"regionizeFunc({}): computed regions:\n", funcId);
for (auto region : regions) {
FTRACE(5, "{}\n\n", show(*region));
}
}
}
示例12: do_analyze_collect
FuncAnalysis do_analyze_collect(const Index& index,
Context const ctx,
CollectedInfo& collect,
ClassAnalysis* clsAnalysis,
const std::vector<Type>* knownArgs) {
assertx(ctx.cls == adjust_closure_context(ctx).cls);
FuncAnalysis ai{ctx};
auto const bump = trace_bump_for(ctx.cls, ctx.func);
Trace::Bump bumper1{Trace::hhbbc, bump};
Trace::Bump bumper2{Trace::hhbbc_cfg, bump};
if (knownArgs) {
FTRACE(2, "{:.^70}\n", "Inline Interp");
}
SCOPE_EXIT {
if (knownArgs) {
FTRACE(2, "{:.^70}\n", "End Inline Interp");
}
};
FTRACE(2, "{:-^70}\n-- {}\n", "Analyze", show(ctx));
/*
* Set of RPO ids that still need to be visited.
*
* Initially, we need each entry block in this list. As we visit
* blocks, we propagate states to their successors and across their
* back edges---when state merges cause a change to the block
* stateIn, we will add it to this queue so it gets visited again.
*/
auto incompleteQ = prepare_incompleteQ(index, ai, clsAnalysis, knownArgs);
/*
* There are potentially infinitely growing types when we're using union_of to
* merge states, so occasionally we need to apply a widening operator.
*
* Currently this is done by having a straight-forward hueristic: if you visit
* a block too many times, we'll start doing all the merges with the widening
* operator. We must then continue iterating in case the actual fixed point is
* higher than the result of widening. Likewise if we loop too much because of
* local static types changing, we'll widen those.
*
* Termination is guaranteed because the widening operator has only finite
* chains in the type lattice.
*/
auto totalVisits = std::vector<uint32_t>(ctx.func->blocks.size());
auto totalLoops = uint32_t{0};
// For debugging, count how many times basic blocks get interpreted.
auto interp_counter = uint32_t{0};
// Used to force blocks that depended on the types of local statics
// to be re-analyzed when the local statics change.
std::unordered_map<borrowed_ptr<const php::Block>, std::map<LocalId, Type>>
usedLocalStatics;
/*
* Iterate until a fixed point.
*
* Each time a stateIn for a block changes, we re-insert the block's
* rpo ID in incompleteQ. Since incompleteQ is ordered, we'll
* always visit blocks with earlier RPO ids first, which hopefully
* means less iterations.
*/
do {
while (!incompleteQ.empty()) {
auto const blk = ai.rpoBlocks[incompleteQ.pop()];
totalVisits[blk->id]++;
FTRACE(2, "block #{}\nin {}{}", blk->id,
state_string(*ctx.func, ai.bdata[blk->id].stateIn, collect),
property_state_string(collect.props));
++interp_counter;
auto propagate = [&] (BlockId target, const State* st) {
if (!st) {
FTRACE(2, " Force reprocess: {}\n", target);
incompleteQ.push(rpoId(ai, target));
return;
}
auto const needsWiden =
totalVisits[target] >= options.analyzeFuncWideningLimit;
FTRACE(2, " {}-> {}\n", needsWiden ? "widening " : "", target);
FTRACE(4, "target old {}",
state_string(*ctx.func, ai.bdata[target].stateIn, collect));
auto const changed =
needsWiden ? widen_into(ai.bdata[target].stateIn, *st)
: merge_into(ai.bdata[target].stateIn, *st);
if (changed) {
incompleteQ.push(rpoId(ai, target));
}
FTRACE(4, "target new {}",
state_string(*ctx.func, ai.bdata[target].stateIn, collect));
};
//.........这里部分代码省略.........
示例13: FTRACE
// -----------------------------------------------------------------------------
// CBTServiceDelayedDestroyer::RunError()
// -----------------------------------------------------------------------------
//
TInt CBTServiceDelayedDestroyer::RunError(TInt aError)
{
FTRACE(FPrint(_L("[BTSU]\t CBTServiceStarter::RunError() aError = %d"), aError) );
(void) aError;
return KErrNone;
}
示例14: FTRACE
/*
-------------------------------------------------------------------------------
Class: CSimpleTimeout
Method: ~CSimpleTimeout
Description: Destructor.
Cancel request
Parameters: None
Return Values: None
Errors/Exceptions: None
Status: Approved
-------------------------------------------------------------------------------
*/
CSimpleTimeout::~CSimpleTimeout()
{
FTRACE(FPrint(_L("CSimpleTimeout::~CSimpleTimeout")));
Cancel();
iTimer.Close();
}
示例15: assert
TransCFG::TransCFG(FuncId funcId,
const ProfData* profData,
const SrcDB& srcDB,
const TcaTransIDMap& jmpToTransID) {
assert(profData);
// add nodes
for (auto tid : profData->funcProfTransIDs(funcId)) {
assert(profData->transRegion(tid) != nullptr);
// This will skip DV Funclets if they were already
// retranslated w/ the prologues:
if (!profData->optimized(profData->transSrcKey(tid))) {
int64_t counter = profData->transCounter(tid);
int64_t weight = RuntimeOption::EvalJitPGOThreshold - counter;
addNode(tid, weight);
}
}
// add arcs
for (TransID dstId : nodes()) {
SrcKey dstSK = profData->transSrcKey(dstId);
RegionDesc::BlockPtr dstBlock = profData->transRegion(dstId)->blocks[0];
const SrcRec* dstSR = srcDB.find(dstSK);
FTRACE(5, "TransCFG: adding incoming arcs in dstId = {}\n", dstId);
TransIDSet predIDs = findPredTrans(dstSR, jmpToTransID);
for (auto predId : predIDs) {
if (hasNode(predId)) {
auto predPostConds =
profData->transRegion(predId)->blocks.back()->postConds();
SrcKey predSK = profData->transSrcKey(predId);
if (preCondsAreSatisfied(dstBlock, predPostConds) &&
predSK.resumed() == dstSK.resumed()) {
FTRACE(5, "TransCFG: adding arc {} -> {} ({} -> {})\n",
predId, dstId, showShort(predSK), showShort(dstSK));
addArc(predId, dstId, TransCFG::Arc::kUnknownWeight);
}
}
}
}
// infer arc weights
bool changed;
do {
changed = false;
for (TransID tid : nodes()) {
int64_t nodeWeight = weight(tid);
if (inferredArcWeight(inArcs(tid), nodeWeight)) changed = true;
if (inferredArcWeight(outArcs(tid), nodeWeight)) changed = true;
}
} while (changed);
// guess weight or non-inferred arcs
for (TransID tid : nodes()) {
for (auto arc : outArcs(tid)) {
if (arc->weight() == Arc::kUnknownWeight) {
arc->setGuessed();
int64_t arcWgt = std::min(weight(arc->src()), weight(arc->dst())) / 2;
arc->setWeight(arcWgt);
}
}
}
}