本文整理汇总了C++中mozilla::MakeEnumeratedRange方法的典型用法代码示例。如果您正苦于以下问题:C++ mozilla::MakeEnumeratedRange方法的具体用法?C++ mozilla::MakeEnumeratedRange怎么用?C++ mozilla::MakeEnumeratedRange使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mozilla
的用法示例。
在下文中一共展示了mozilla::MakeEnumeratedRange方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: PatchedImmPtr
static void
StaticallyLink(CodeSegment& cs, const LinkData& linkData, ExclusiveContext* cx)
{
for (LinkData::InternalLink link : linkData.internalLinks) {
uint8_t* patchAt = cs.base() + link.patchAtOffset;
void* target = cs.base() + link.targetOffset;
if (link.isRawPointerPatch())
*(void**)(patchAt) = target;
else
Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
}
for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
const Uint32Vector& offsets = linkData.symbolicLinks[imm];
for (size_t i = 0; i < offsets.length(); i++) {
uint8_t* patchAt = cs.base() + offsets[i];
void* target = AddressOf(imm, cx);
Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
PatchedImmPtr(target),
PatchedImmPtr((void*)-1));
}
}
// These constants are logically part of the code:
*(double*)(cs.globalData() + NaN64GlobalDataOffset) = GenericNaN();
*(float*)(cs.globalData() + NaN32GlobalDataOffset) = GenericNaN();
}
示例2: AddressOf
bool
Module::clone(JSContext* cx, const StaticLinkData& link, Module* out) const
{
MOZ_ASSERT(dynamicallyLinked_);
// The out->module_ field was already cloned and initialized when 'out' was
// constructed. This function should clone the rest.
MOZ_ASSERT(out->module_);
out->profilingEnabled_ = profilingEnabled_;
if (!CloneVector(cx, funcLabels_, &out->funcLabels_))
return false;
#ifdef DEBUG
// Put the symbolic links back to -1 so PatchDataWithValueCheck assertions
// in Module::staticallyLink are valid.
for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
void* callee = AddressOf(imm, cx);
const StaticLinkData::OffsetVector& offsets = link.symbolicLinks[imm];
for (uint32_t offset : offsets) {
jit::Assembler::PatchDataWithValueCheck(jit::CodeLocationLabel(out->code() + offset),
jit::PatchedImmPtr((void*)-1),
jit::PatchedImmPtr(callee));
}
}
#endif
// If the copied machine code has been specialized to the heap, it must be
// unspecialized in the copy.
if (usesHeap())
out->despecializeFromHeap(heap_);
return true;
}
示例3:
bool
StaticLinkData::SymbolicLinkArray::clone(JSContext* cx, SymbolicLinkArray* out) const
{
for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
if (!ClonePodVector(cx, (*this)[imm], &(*out)[imm]))
return false;
}
return true;
}
示例4: alloc
bool
ModuleGenerator::finishCodegen(StaticLinkData* link)
{
uint32_t offsetInWhole = masm_.size();
// Generate stubs in a separate MacroAssembler since, otherwise, for modules
// larger than the JumpImmediateRange, even local uses of Label will fail
// due to the large absolute offsets temporarily stored by Label::bind().
Vector<Offsets> entries(cx_);
Vector<ProfilingOffsets> interpExits(cx_);
Vector<ProfilingOffsets> jitExits(cx_);
EnumeratedArray<JumpTarget, JumpTarget::Limit, Offsets> jumpTargets;
ProfilingOffsets badIndirectCallExit;
Offsets interruptExit;
{
TempAllocator alloc(&lifo_);
MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc);
if (!entries.resize(numExports()))
return false;
for (uint32_t i = 0; i < numExports(); i++) {
uint32_t target = exportMap_->exportFuncIndices[i];
const Sig& sig = module_->exports[i].sig();
entries[i] = GenerateEntry(masm, target, sig, usesHeap());
}
if (!interpExits.resize(numImports()))
return false;
if (!jitExits.resize(numImports()))
return false;
for (uint32_t i = 0; i < numImports(); i++) {
interpExits[i] = GenerateInterpExit(masm, module_->imports[i], i);
jitExits[i] = GenerateJitExit(masm, module_->imports[i], usesHeap());
}
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit))
jumpTargets[target] = GenerateJumpTarget(masm, target);
badIndirectCallExit = GenerateBadIndirectCallExit(masm);
interruptExit = GenerateInterruptStub(masm);
if (masm.oom() || !masm_.asmMergeWith(masm))
return false;
}
// Adjust each of the resulting Offsets (to account for being merged into
// masm_) and then create code ranges for all the stubs.
for (uint32_t i = 0; i < numExports(); i++) {
entries[i].offsetBy(offsetInWhole);
module_->exports[i].initStubOffset(entries[i].begin);
if (!module_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
return false;
}
for (uint32_t i = 0; i < numImports(); i++) {
interpExits[i].offsetBy(offsetInWhole);
module_->imports[i].initInterpExitOffset(interpExits[i].begin);
if (!module_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
return false;
jitExits[i].offsetBy(offsetInWhole);
module_->imports[i].initJitExitOffset(jitExits[i].begin);
if (!module_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i]))
return false;
}
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) {
jumpTargets[target].offsetBy(offsetInWhole);
if (!module_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target]))
return false;
}
badIndirectCallExit.offsetBy(offsetInWhole);
if (!module_->codeRanges.emplaceBack(CodeRange::ErrorExit, badIndirectCallExit))
return false;
interruptExit.offsetBy(offsetInWhole);
if (!module_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit))
return false;
// Fill in StaticLinkData with the offsets of these stubs.
link->pod.outOfBoundsOffset = jumpTargets[JumpTarget::OutOfBounds].begin;
link->pod.interruptOffset = interruptExit.begin;
for (uint32_t sigIndex = 0; sigIndex < numSigs_; sigIndex++) {
const TableModuleGeneratorData& table = shared_->sigToTable[sigIndex];
if (table.elemFuncIndices.empty())
continue;
Uint32Vector elemOffsets;
if (!elemOffsets.resize(table.elemFuncIndices.length()))
return false;
for (size_t i = 0; i < table.elemFuncIndices.length(); i++) {
uint32_t funcIndex = table.elemFuncIndices[i];
if (funcIndex == BadIndirectCall)
//.........这里部分代码省略.........
示例5: alreadyThunked
bool
ModuleGenerator::convertOutOfRangeBranchesToThunks()
{
masm_.haltingAlign(CodeAlignment);
// Create thunks for callsites that have gone out of range. Use a map to
// create one thunk for each callee since there is often high reuse.
OffsetMap alreadyThunked(cx_);
if (!alreadyThunked.init())
return false;
for (; lastPatchedCallsite_ < masm_.callSites().length(); lastPatchedCallsite_++) {
const CallSiteAndTarget& cs = masm_.callSites()[lastPatchedCallsite_];
if (!cs.isInternal())
continue;
uint32_t callerOffset = cs.returnAddressOffset();
MOZ_RELEASE_ASSERT(callerOffset < INT32_MAX);
if (funcIsDefined(cs.targetIndex())) {
uint32_t calleeOffset = funcEntry(cs.targetIndex());
MOZ_RELEASE_ASSERT(calleeOffset < INT32_MAX);
if (uint32_t(abs(int32_t(calleeOffset) - int32_t(callerOffset))) < JumpRange()) {
masm_.patchCall(callerOffset, calleeOffset);
continue;
}
}
OffsetMap::AddPtr p = alreadyThunked.lookupForAdd(cs.targetIndex());
if (!p) {
Offsets offsets;
offsets.begin = masm_.currentOffset();
uint32_t thunkOffset = masm_.thunkWithPatch().offset();
if (masm_.oom())
return false;
offsets.end = masm_.currentOffset();
if (!module_->codeRanges.emplaceBack(CodeRange::CallThunk, offsets))
return false;
if (!module_->callThunks.emplaceBack(thunkOffset, cs.targetIndex()))
return false;
if (!alreadyThunked.add(p, cs.targetIndex(), offsets.begin))
return false;
}
masm_.patchCall(callerOffset, p->value());
}
// Create thunks for jumps to stubs. Stubs are always generated at the end
// so unconditionally thunk all existing jump sites.
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) {
if (masm_.jumpSites()[target].empty())
continue;
for (uint32_t jumpSite : masm_.jumpSites()[target]) {
RepatchLabel label;
label.use(jumpSite);
masm_.bind(&label);
}
Offsets offsets;
offsets.begin = masm_.currentOffset();
uint32_t thunkOffset = masm_.thunkWithPatch().offset();
if (masm_.oom())
return false;
offsets.end = masm_.currentOffset();
if (!module_->codeRanges.emplaceBack(CodeRange::Inline, offsets))
return false;
if (!jumpThunks_[target].append(thunkOffset))
return false;
}
// Unlike callsites, which need to be persisted in the Module, we can simply
// flush jump sites after each patching pass.
masm_.clearJumpSites();
return true;
}
示例6: alloc
bool
ModuleGenerator::finishCodegen()
{
uint32_t offsetInWhole = masm_.size();
uint32_t numFuncExports = metadata_->funcExports.length();
MOZ_ASSERT(numFuncExports == exportedFuncs_.count());
// Generate stubs in a separate MacroAssembler since, otherwise, for modules
// larger than the JumpImmediateRange, even local uses of Label will fail
// due to the large absolute offsets temporarily stored by Label::bind().
OffsetVector entries;
ProfilingOffsetVector interpExits;
ProfilingOffsetVector jitExits;
EnumeratedArray<JumpTarget, JumpTarget::Limit, Offsets> jumpTargets;
Offsets interruptExit;
{
TempAllocator alloc(&lifo_);
MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc);
if (!entries.resize(numFuncExports))
return false;
for (uint32_t i = 0; i < numFuncExports; i++)
entries[i] = GenerateEntry(masm, metadata_->funcExports[i]);
if (!interpExits.resize(numFuncImports()))
return false;
if (!jitExits.resize(numFuncImports()))
return false;
for (uint32_t i = 0; i < numFuncImports(); i++) {
interpExits[i] = GenerateInterpExit(masm, metadata_->funcImports[i], i);
jitExits[i] = GenerateJitExit(masm, metadata_->funcImports[i]);
}
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit))
jumpTargets[target] = GenerateJumpTarget(masm, target);
interruptExit = GenerateInterruptStub(masm);
if (masm.oom() || !masm_.asmMergeWith(masm))
return false;
}
// Adjust each of the resulting Offsets (to account for being merged into
// masm_) and then create code ranges for all the stubs.
for (uint32_t i = 0; i < numFuncExports; i++) {
entries[i].offsetBy(offsetInWhole);
metadata_->funcExports[i].initEntryOffset(entries[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
return false;
}
for (uint32_t i = 0; i < numFuncImports(); i++) {
interpExits[i].offsetBy(offsetInWhole);
metadata_->funcImports[i].initInterpExitOffset(interpExits[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
return false;
jitExits[i].offsetBy(offsetInWhole);
metadata_->funcImports[i].initJitExitOffset(jitExits[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i]))
return false;
}
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) {
jumpTargets[target].offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target]))
return false;
}
interruptExit.offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit))
return false;
// Fill in LinkData with the offsets of these stubs.
linkData_.interruptOffset = interruptExit.begin;
linkData_.outOfBoundsOffset = jumpTargets[JumpTarget::OutOfBounds].begin;
linkData_.unalignedAccessOffset = jumpTargets[JumpTarget::UnalignedAccess].begin;
linkData_.badIndirectCallOffset = jumpTargets[JumpTarget::BadIndirectCall].begin;
// Only call convertOutOfRangeBranchesToThunks after all other codegen that may
// emit new jumps to JumpTargets has finished.
if (!convertOutOfRangeBranchesToThunks())
return false;
// Now that all thunks have been generated, patch all the thunks.
for (CallThunk& callThunk : metadata_->callThunks) {
uint32_t funcIndex = callThunk.u.funcIndex;
callThunk.u.codeRangeIndex = funcIndexToCodeRange_[funcIndex];
masm_.patchThunk(callThunk.offset, funcCodeRange(funcIndex).funcNonProfilingEntry());
}
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) {
for (uint32_t thunkOffset : jumpThunks_[target])
//.........这里部分代码省略.........
示例7: jcx
bool
Module::staticallyLink(ExclusiveContext* cx, const StaticLinkData& linkData)
{
MOZ_ASSERT(!dynamicallyLinked_);
MOZ_ASSERT(!staticallyLinked_);
staticallyLinked_ = true;
// Push a JitContext for benefit of IsCompilingAsmJS and delay flushing
// until Module::dynamicallyLink.
JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
MOZ_ASSERT(IsCompilingAsmJS());
AutoFlushICache afc("Module::staticallyLink", /* inhibit = */ true);
AutoFlushICache::setRange(uintptr_t(code()), codeBytes());
interrupt_ = code() + linkData.pod.interruptOffset;
outOfBounds_ = code() + linkData.pod.outOfBoundsOffset;
for (StaticLinkData::InternalLink link : linkData.internalLinks) {
uint8_t* patchAt = code() + link.patchAtOffset;
void* target = code() + link.targetOffset;
// If the target of an InternalLink is the non-profiling entry of a
// function, then we assume it is for a call that wants to call the
// profiling entry when profiling is enabled. Note that the target may
// be in the middle of a function (e.g., for a switch table) and in
// these cases we should not modify the target.
if (profilingEnabled_) {
if (const CodeRange* cr = lookupCodeRange(target)) {
if (cr->isFunction() && link.targetOffset == cr->funcNonProfilingEntry())
target = code() + cr->funcProfilingEntry();
}
}
if (link.isRawPointerPatch())
*(void**)(patchAt) = target;
else
Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
}
for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
const StaticLinkData::OffsetVector& offsets = linkData.symbolicLinks[imm];
for (size_t i = 0; i < offsets.length(); i++) {
uint8_t* patchAt = code() + offsets[i];
void* target = AddressOf(imm, cx);
Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
PatchedImmPtr(target),
PatchedImmPtr((void*)-1));
}
}
for (const StaticLinkData::FuncPtrTable& table : linkData.funcPtrTables) {
auto array = reinterpret_cast<void**>(globalData() + table.globalDataOffset);
for (size_t i = 0; i < table.elemOffsets.length(); i++) {
uint8_t* elem = code() + table.elemOffsets[i];
if (profilingEnabled_)
elem = code() + lookupCodeRange(elem)->funcProfilingEntry();
array[i] = elem;
}
}
// CodeRangeVector, CallSiteVector and the code technically have all the
// necessary info to do all the updates necessary in setProfilingEnabled.
// However, to simplify the finding of function-pointer table sizes and
// global-data offsets, save just that information here.
if (!funcPtrTables_.appendAll(linkData.funcPtrTables)) {
ReportOutOfMemory(cx);
return false;
}
return true;
}
示例8: alloc
bool
ModuleGenerator::finishCodegen()
{
masm_.haltingAlign(CodeAlignment);
uint32_t offsetInWhole = masm_.size();
uint32_t numFuncExports = metadata_->funcExports.length();
MOZ_ASSERT(numFuncExports == exportedFuncs_.count());
// Generate stubs in a separate MacroAssembler since, otherwise, for modules
// larger than the JumpImmediateRange, even local uses of Label will fail
// due to the large absolute offsets temporarily stored by Label::bind().
OffsetVector entries;
ProfilingOffsetVector interpExits;
ProfilingOffsetVector jitExits;
TrapExitOffsetArray trapExits;
Offsets outOfBoundsExit;
Offsets unalignedAccessExit;
Offsets interruptExit;
Offsets throwStub;
{
TempAllocator alloc(&lifo_);
MacroAssembler masm(MacroAssembler::WasmToken(), alloc);
Label throwLabel;
if (!entries.resize(numFuncExports))
return false;
for (uint32_t i = 0; i < numFuncExports; i++)
entries[i] = GenerateEntry(masm, metadata_->funcExports[i]);
if (!interpExits.resize(numFuncImports()))
return false;
if (!jitExits.resize(numFuncImports()))
return false;
for (uint32_t i = 0; i < numFuncImports(); i++) {
interpExits[i] = GenerateImportInterpExit(masm, metadata_->funcImports[i], i, &throwLabel);
jitExits[i] = GenerateImportJitExit(masm, metadata_->funcImports[i], &throwLabel);
}
for (Trap trap : MakeEnumeratedRange(Trap::Limit))
trapExits[trap] = GenerateTrapExit(masm, trap, &throwLabel);
outOfBoundsExit = GenerateOutOfBoundsExit(masm, &throwLabel);
unalignedAccessExit = GenerateUnalignedExit(masm, &throwLabel);
interruptExit = GenerateInterruptExit(masm, &throwLabel);
throwStub = GenerateThrowStub(masm, &throwLabel);
if (masm.oom() || !masm_.asmMergeWith(masm))
return false;
}
// Adjust each of the resulting Offsets (to account for being merged into
// masm_) and then create code ranges for all the stubs.
for (uint32_t i = 0; i < numFuncExports; i++) {
entries[i].offsetBy(offsetInWhole);
metadata_->funcExports[i].initEntryOffset(entries[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
return false;
}
for (uint32_t i = 0; i < numFuncImports(); i++) {
interpExits[i].offsetBy(offsetInWhole);
metadata_->funcImports[i].initInterpExitOffset(interpExits[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
return false;
jitExits[i].offsetBy(offsetInWhole);
metadata_->funcImports[i].initJitExitOffset(jitExits[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i]))
return false;
}
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
trapExits[trap].offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::TrapExit, trapExits[trap]))
return false;
}
outOfBoundsExit.offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, outOfBoundsExit))
return false;
unalignedAccessExit.offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, unalignedAccessExit))
return false;
interruptExit.offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit))
return false;
throwStub.offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, throwStub))
return false;
// Fill in LinkData with the offsets of these stubs.
linkData_.outOfBoundsOffset = outOfBoundsExit.begin;
//.........这里部分代码省略.........