本文整理汇总了C++中Offsets类的典型用法代码示例。如果您正苦于以下问题:C++ Offsets类的具体用法?C++ Offsets怎么用?C++ Offsets使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Offsets类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getNonZeroOffsets
Offsets DenseVectorN::getNonZeroOffsets() const
{
Offsets offsetList;
for (int i = 0, sz = x.size(); i < sz; ++i)
{
if (x[i] != 0.0)
{
offsetList.push_back(Offset(i, x[i]));
}
}
return offsetList;
}
示例2: size
ColumnPtr ColumnArray::replicateNumber(const Offsets & replicate_offsets) const
{
size_t col_size = size();
if (col_size != replicate_offsets.size())
throw Exception("Size of offsets doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
MutableColumnPtr res = cloneEmpty();
if (0 == col_size)
return res;
ColumnArray & res_ = typeid_cast<ColumnArray &>(*res);
const typename ColumnVector<T>::Container & src_data = typeid_cast<const ColumnVector<T> &>(*data).getData();
const Offsets & src_offsets = getOffsets();
typename ColumnVector<T>::Container & res_data = typeid_cast<ColumnVector<T> &>(res_.getData()).getData();
Offsets & res_offsets = res_.getOffsets();
res_data.reserve(data->size() / col_size * replicate_offsets.back());
res_offsets.reserve(replicate_offsets.back());
Offset prev_replicate_offset = 0;
Offset prev_data_offset = 0;
Offset current_new_offset = 0;
for (size_t i = 0; i < col_size; ++i)
{
size_t size_to_replicate = replicate_offsets[i] - prev_replicate_offset;
size_t value_size = src_offsets[i] - prev_data_offset;
for (size_t j = 0; j < size_to_replicate; ++j)
{
current_new_offset += value_size;
res_offsets.push_back(current_new_offset);
if (value_size)
{
res_data.resize(res_data.size() + value_size);
memcpy(&res_data[res_data.size() - value_size], &src_data[prev_data_offset], value_size * sizeof(T));
}
}
prev_replicate_offset = replicate_offsets[i];
prev_data_offset = src_offsets[i];
}
return res;
}
示例3: FindOffsets
// returns rebased addresses
Offsets HexSearcher::FindOffsets(std::vector<unsigned char> const& pattern, size_t limit)
{
Offsets offsets;
ADDRESS begin = GetModuleBegin();
ADDRESS end = GetModuleEnd();
// loop through every hex value in the binary
for (ADDRESS i = begin; (i + pattern.size()) < end; ++i)
{
if (limit && offsets.size() >= limit)
break;
size_t matches = 0;
for (size_t j = 0; j < pattern.size(); j++)
{
// 0x00, any val
if (pattern[j] == 0)
{
matches++;
continue;
}
//ADDRESS static_address = STATIC_REBASE(i + j);
// pattern doesn't match, retry @ next hex val
unsigned char ch = *(unsigned char*)(i + j);
if (ch != pattern[j])
break;
matches++;
}
if (matches == pattern.size())
{
offsets.insert(i);
i += matches;
}
}
return offsets;
}
示例4: replicate
ColumnPtr ColumnFixedString::replicate(const Offsets & offsets) const
{
size_t col_size = size();
if (col_size != offsets.size())
throw Exception("Size of offsets doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
auto res = ColumnFixedString::create(n);
if (0 == col_size)
return std::move(res);
Chars_t & res_chars = res->chars;
res_chars.resize(n * offsets.back());
Offset curr_offset = 0;
for (size_t i = 0; i < col_size; ++i)
for (size_t next_offset = offsets[i]; curr_offset < next_offset; ++curr_offset)
memcpySmallAllowReadWriteOverflow15(&res->chars[curr_offset * n], &chars[i * n], n);
return std::move(res);
}
示例5: getOffsets
MatrixOpDataRcPtr MatrixOpData::inverse() const
{
// Get the inverse matrix.
MatrixArrayPtr invMatrixArray = m_array.inverse();
// MatrixArray::inverse() will throw for singular matrices.
// Calculate the inverse offset.
const Offsets& offsets = getOffsets();
Offsets invOffsets;
if (offsets.isNotNull())
{
invMatrixArray->inner(offsets, invOffsets);
invOffsets.scale(-1);
}
MatrixOpDataRcPtr invOp = std::make_shared<MatrixOpData>(getOutputBitDepth(), getInputBitDepth());
invOp->setRGBA(&(invMatrixArray->getValues()[0]));
invOp->setOffsets(invOffsets);
// No need to call validate(), the invOp will have proper dimension,
// bit-depths, matrix and offets values.
return invOp;
}
示例6: replicate
ColumnPtr ColumnArray::replicate(const Offsets & replicate_offsets) const
{
if (replicate_offsets.empty())
return cloneEmpty();
if (typeid_cast<const ColumnUInt8 *>(data.get())) return replicateNumber<UInt8>(replicate_offsets);
if (typeid_cast<const ColumnUInt16 *>(data.get())) return replicateNumber<UInt16>(replicate_offsets);
if (typeid_cast<const ColumnUInt32 *>(data.get())) return replicateNumber<UInt32>(replicate_offsets);
if (typeid_cast<const ColumnUInt64 *>(data.get())) return replicateNumber<UInt64>(replicate_offsets);
if (typeid_cast<const ColumnInt8 *>(data.get())) return replicateNumber<Int8>(replicate_offsets);
if (typeid_cast<const ColumnInt16 *>(data.get())) return replicateNumber<Int16>(replicate_offsets);
if (typeid_cast<const ColumnInt32 *>(data.get())) return replicateNumber<Int32>(replicate_offsets);
if (typeid_cast<const ColumnInt64 *>(data.get())) return replicateNumber<Int64>(replicate_offsets);
if (typeid_cast<const ColumnFloat32 *>(data.get())) return replicateNumber<Float32>(replicate_offsets);
if (typeid_cast<const ColumnFloat64 *>(data.get())) return replicateNumber<Float64>(replicate_offsets);
if (typeid_cast<const ColumnString *>(data.get())) return replicateString(replicate_offsets);
if (typeid_cast<const ColumnConst *>(data.get())) return replicateConst(replicate_offsets);
if (typeid_cast<const ColumnNullable *>(data.get())) return replicateNullable(replicate_offsets);
if (typeid_cast<const ColumnTuple *>(data.get())) return replicateTuple(replicate_offsets);
return replicateGeneric(replicate_offsets);
}
示例7: replicateGeneric
ColumnPtr ColumnArray::replicateGeneric(const Offsets & replicate_offsets) const
{
size_t col_size = size();
if (col_size != replicate_offsets.size())
throw Exception("Size of offsets doesn't match size of column.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
MutableColumnPtr res = cloneEmpty();
ColumnArray & res_concrete = static_cast<ColumnArray &>(*res);
if (0 == col_size)
return res;
IColumn::Offset prev_offset = 0;
for (size_t i = 0; i < col_size; ++i)
{
size_t size_to_replicate = replicate_offsets[i] - prev_offset;
prev_offset = replicate_offsets[i];
for (size_t j = 0; j < size_to_replicate; ++j)
res_concrete.insertFrom(*this, i);
}
return res;
}
示例8: QPixmap
void Layer::drawPixmap()
{
if ((boundingRect().width() == 0) || (boundingRect().height() == 0))
return;
// TODO Forward
if (m_currentPixmap)
delete m_currentPixmap;
m_currentPixmap = new QPixmap(boundingRect().width() * m_areaToDraw, boundingRect().height());
QPainter p(m_currentPixmap);
int xPoint = 0;
for (int i = 0; i < m_offsets[m_columnOffset].size(); i++) {
Offsets offset = m_offsets[m_columnOffset].at(i);
if (((m_type == Quasi::MirroredType) && (i != 0)
&& (offset.point() - m_latestPoint < 0))
|| m_shouldMirror) {
m_drawingMirrored = !m_drawingMirrored;
m_shouldMirror = false;
}
QPixmap pix = generatePartialPixmap(offset.point(), offset.size());
p.drawPixmap(xPoint, 0, pix);
xPoint += pix.width();
m_latestPoint = offset.point();
if ((m_type == Quasi::MirroredType)
&& (i == m_offsets[m_columnOffset].size() - 1)
&& (offset.size() < m_numColumns))
m_shouldMirror = true;
}
if (m_direction == Quasi::ForwardDirection)
m_columnOffset = (m_columnOffset - 1 < 0) ? m_offsets.size() - 1 : m_columnOffset - 1;
else
m_columnOffset = (m_columnOffset + 1) % m_offsets.size();
p.end();
}
示例9: entries
bool
ModuleGenerator::finishCodegen(StaticLinkData* link)
{
uint32_t offsetInWhole = masm_.size();
// Generate stubs in a separate MacroAssembler since, otherwise, for modules
// larger than the JumpImmediateRange, even local uses of Label will fail
// due to the large absolute offsets temporarily stored by Label::bind().
Vector<Offsets> entries(cx_);
Vector<ProfilingOffsets> interpExits(cx_);
Vector<ProfilingOffsets> jitExits(cx_);
EnumeratedArray<JumpTarget, JumpTarget::Limit, Offsets> jumpTargets;
ProfilingOffsets badIndirectCallExit;
Offsets interruptExit;
{
TempAllocator alloc(&lifo_);
MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc);
if (!entries.resize(numExports()))
return false;
for (uint32_t i = 0; i < numExports(); i++) {
uint32_t target = exportMap_->exportFuncIndices[i];
const Sig& sig = module_->exports[i].sig();
entries[i] = GenerateEntry(masm, target, sig, usesHeap());
}
if (!interpExits.resize(numImports()))
return false;
if (!jitExits.resize(numImports()))
return false;
for (uint32_t i = 0; i < numImports(); i++) {
interpExits[i] = GenerateInterpExit(masm, module_->imports[i], i);
jitExits[i] = GenerateJitExit(masm, module_->imports[i], usesHeap());
}
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit))
jumpTargets[target] = GenerateJumpTarget(masm, target);
badIndirectCallExit = GenerateBadIndirectCallExit(masm);
interruptExit = GenerateInterruptStub(masm);
if (masm.oom() || !masm_.asmMergeWith(masm))
return false;
}
// Adjust each of the resulting Offsets (to account for being merged into
// masm_) and then create code ranges for all the stubs.
for (uint32_t i = 0; i < numExports(); i++) {
entries[i].offsetBy(offsetInWhole);
module_->exports[i].initStubOffset(entries[i].begin);
if (!module_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
return false;
}
for (uint32_t i = 0; i < numImports(); i++) {
interpExits[i].offsetBy(offsetInWhole);
module_->imports[i].initInterpExitOffset(interpExits[i].begin);
if (!module_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
return false;
jitExits[i].offsetBy(offsetInWhole);
module_->imports[i].initJitExitOffset(jitExits[i].begin);
if (!module_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i]))
return false;
}
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) {
jumpTargets[target].offsetBy(offsetInWhole);
if (!module_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target]))
return false;
}
badIndirectCallExit.offsetBy(offsetInWhole);
if (!module_->codeRanges.emplaceBack(CodeRange::ErrorExit, badIndirectCallExit))
return false;
interruptExit.offsetBy(offsetInWhole);
if (!module_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit))
return false;
// Fill in StaticLinkData with the offsets of these stubs.
link->pod.outOfBoundsOffset = jumpTargets[JumpTarget::OutOfBounds].begin;
link->pod.interruptOffset = interruptExit.begin;
for (uint32_t sigIndex = 0; sigIndex < numSigs_; sigIndex++) {
const TableModuleGeneratorData& table = shared_->sigToTable[sigIndex];
if (table.elemFuncIndices.empty())
continue;
Uint32Vector elemOffsets;
if (!elemOffsets.resize(table.elemFuncIndices.length()))
return false;
for (size_t i = 0; i < table.elemFuncIndices.length(); i++) {
uint32_t funcIndex = table.elemFuncIndices[i];
if (funcIndex == BadIndirectCall)
//.........这里部分代码省略.........
示例10: MOZ_ASSERT
bool
ModuleGenerator::finishCodegen()
{
uint32_t offsetInWhole = masm_.size();
uint32_t numFuncExports = metadata_->funcExports.length();
MOZ_ASSERT(numFuncExports == exportedFuncs_.count());
// Generate stubs in a separate MacroAssembler since, otherwise, for modules
// larger than the JumpImmediateRange, even local uses of Label will fail
// due to the large absolute offsets temporarily stored by Label::bind().
OffsetVector entries;
ProfilingOffsetVector interpExits;
ProfilingOffsetVector jitExits;
EnumeratedArray<JumpTarget, JumpTarget::Limit, Offsets> jumpTargets;
Offsets interruptExit;
{
TempAllocator alloc(&lifo_);
MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc);
if (!entries.resize(numFuncExports))
return false;
for (uint32_t i = 0; i < numFuncExports; i++)
entries[i] = GenerateEntry(masm, metadata_->funcExports[i]);
if (!interpExits.resize(numFuncImports()))
return false;
if (!jitExits.resize(numFuncImports()))
return false;
for (uint32_t i = 0; i < numFuncImports(); i++) {
interpExits[i] = GenerateInterpExit(masm, metadata_->funcImports[i], i);
jitExits[i] = GenerateJitExit(masm, metadata_->funcImports[i]);
}
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit))
jumpTargets[target] = GenerateJumpTarget(masm, target);
interruptExit = GenerateInterruptStub(masm);
if (masm.oom() || !masm_.asmMergeWith(masm))
return false;
}
// Adjust each of the resulting Offsets (to account for being merged into
// masm_) and then create code ranges for all the stubs.
for (uint32_t i = 0; i < numFuncExports; i++) {
entries[i].offsetBy(offsetInWhole);
metadata_->funcExports[i].initEntryOffset(entries[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
return false;
}
for (uint32_t i = 0; i < numFuncImports(); i++) {
interpExits[i].offsetBy(offsetInWhole);
metadata_->funcImports[i].initInterpExitOffset(interpExits[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
return false;
jitExits[i].offsetBy(offsetInWhole);
metadata_->funcImports[i].initJitExitOffset(jitExits[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i]))
return false;
}
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) {
jumpTargets[target].offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target]))
return false;
}
interruptExit.offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit))
return false;
// Fill in LinkData with the offsets of these stubs.
linkData_.interruptOffset = interruptExit.begin;
linkData_.outOfBoundsOffset = jumpTargets[JumpTarget::OutOfBounds].begin;
linkData_.unalignedAccessOffset = jumpTargets[JumpTarget::UnalignedAccess].begin;
linkData_.badIndirectCallOffset = jumpTargets[JumpTarget::BadIndirectCall].begin;
// Only call convertOutOfRangeBranchesToThunks after all other codegen that may
// emit new jumps to JumpTargets has finished.
if (!convertOutOfRangeBranchesToThunks())
return false;
// Now that all thunks have been generated, patch all the thunks.
for (CallThunk& callThunk : metadata_->callThunks) {
uint32_t funcIndex = callThunk.u.funcIndex;
callThunk.u.codeRangeIndex = funcIndexToCodeRange_[funcIndex];
masm_.patchThunk(callThunk.offset, funcCodeRange(funcIndex).funcNonProfilingEntry());
}
for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) {
for (uint32_t thunkOffset : jumpThunks_[target])
//.........这里部分代码省略.........
示例11: MOZ_ASSERT
bool
ModuleGenerator::finishCodegen()
{
masm_.haltingAlign(CodeAlignment);
uint32_t offsetInWhole = masm_.size();
uint32_t numFuncExports = metadata_->funcExports.length();
MOZ_ASSERT(numFuncExports == exportedFuncs_.count());
// Generate stubs in a separate MacroAssembler since, otherwise, for modules
// larger than the JumpImmediateRange, even local uses of Label will fail
// due to the large absolute offsets temporarily stored by Label::bind().
OffsetVector entries;
ProfilingOffsetVector interpExits;
ProfilingOffsetVector jitExits;
TrapExitOffsetArray trapExits;
Offsets outOfBoundsExit;
Offsets unalignedAccessExit;
Offsets interruptExit;
Offsets throwStub;
{
TempAllocator alloc(&lifo_);
MacroAssembler masm(MacroAssembler::WasmToken(), alloc);
Label throwLabel;
if (!entries.resize(numFuncExports))
return false;
for (uint32_t i = 0; i < numFuncExports; i++)
entries[i] = GenerateEntry(masm, metadata_->funcExports[i]);
if (!interpExits.resize(numFuncImports()))
return false;
if (!jitExits.resize(numFuncImports()))
return false;
for (uint32_t i = 0; i < numFuncImports(); i++) {
interpExits[i] = GenerateImportInterpExit(masm, metadata_->funcImports[i], i, &throwLabel);
jitExits[i] = GenerateImportJitExit(masm, metadata_->funcImports[i], &throwLabel);
}
for (Trap trap : MakeEnumeratedRange(Trap::Limit))
trapExits[trap] = GenerateTrapExit(masm, trap, &throwLabel);
outOfBoundsExit = GenerateOutOfBoundsExit(masm, &throwLabel);
unalignedAccessExit = GenerateUnalignedExit(masm, &throwLabel);
interruptExit = GenerateInterruptExit(masm, &throwLabel);
throwStub = GenerateThrowStub(masm, &throwLabel);
if (masm.oom() || !masm_.asmMergeWith(masm))
return false;
}
// Adjust each of the resulting Offsets (to account for being merged into
// masm_) and then create code ranges for all the stubs.
for (uint32_t i = 0; i < numFuncExports; i++) {
entries[i].offsetBy(offsetInWhole);
metadata_->funcExports[i].initEntryOffset(entries[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
return false;
}
for (uint32_t i = 0; i < numFuncImports(); i++) {
interpExits[i].offsetBy(offsetInWhole);
metadata_->funcImports[i].initInterpExitOffset(interpExits[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
return false;
jitExits[i].offsetBy(offsetInWhole);
metadata_->funcImports[i].initJitExitOffset(jitExits[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i]))
return false;
}
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
trapExits[trap].offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::TrapExit, trapExits[trap]))
return false;
}
outOfBoundsExit.offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, outOfBoundsExit))
return false;
unalignedAccessExit.offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, unalignedAccessExit))
return false;
interruptExit.offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit))
return false;
throwStub.offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, throwStub))
return false;
// Fill in LinkData with the offsets of these stubs.
linkData_.outOfBoundsOffset = outOfBoundsExit.begin;
//.........这里部分代码省略.........
示例12: if
int
varcmp_offsets(const void *i1, const void *i2)
{
if (offsets.off(*((Ident *) i1)) < offsets.off(*((Ident *) i2)))
return -1;
else if (offsets.off(*((Ident *) i1)) > offsets.off(*((Ident *) i2)))
return 1;
else
return 0;
}
示例13: invariant
void
st_dfa_replace_indices(DFA *a, IdentList *newvars, IdentList *oldvars,
bool offnew, bool offold)
{
if (newvars && oldvars && newvars != oldvars) {
invariant(newvars->size() == oldvars->size());
int *indexmap = new int[offsets.maxOffset()];
IdentList::iterator i, j;
bool dif = false;
for(i = newvars->begin(), j = oldvars->begin();
j != oldvars->end(); i++, j++) {
int theold = offold ? offsets.off(*j) : *j;
int thenew = offnew ? offsets.off(*i) : *i;
indexmap[theold] = thenew;
if (theold != thenew)
dif = true;
}
if (dif) {
Timer temp;
if (options.time) {
timer_replace_indices.start();
if (options.statistics)
temp.start();
}
if (options.statistics)
cout << "Replacing indices\n";
dfaReplaceIndices(a, indexmap);
num_replaces++;
if (options.time) {
timer_replace_indices.stop();
if (options.statistics) {
temp.stop();
cout << " Time: ";
temp.print();
}
}
}
delete[] indexmap;
if (options.intermediate)
dfaPrintVerbose(a);
}
/*#warning update_largest(a);*/
}
示例14: invariant
void
st_gta_replace_indices(GTA *g, IdentList *newvars, IdentList *oldvars,
bool offnew, bool offold)
{
if (newvars && oldvars && newvars != oldvars) {
invariant(newvars->size() == oldvars->size());
unsigned *indexmap = new unsigned[offsets.maxOffset()];
IdentList::iterator i, j;
bool dif = false;
for(i = newvars->begin(), j = oldvars->begin();
j != oldvars->end(); i++, j++) {
int theold = offold ? offsets.off(*j) : *j;
int thenew = offnew ? offsets.off(*i) : *i;
indexmap[theold] = thenew;
if (theold != thenew)
dif = true;
}
if (dif) {
Timer temp;
if (options.time) {
timer_replace_indices.start();
if (options.statistics)
temp.start();
}
if (options.statistics)
cout << "Replacing indices\n";
gtaReplaceIndices(g, indexmap);
num_replaces++;
if (options.time) {
timer_replace_indices.stop();
if (options.statistics) {
temp.stop();
cout << " Time: ";
temp.print();
}
}
}
delete[] indexmap;
}
update_largest(g);
}
示例15: qsort
void
Signature::make(IdentList &idents)
{
size = idents.size();
int *tab1 = new int[size];
int *tab2 = new int[size];
IdentList::iterator i;
unsigned int x,y,s;
for (i = idents.begin(), x = 0;
i != idents.end(); i++, x++)
tab1[x] = tab2[x] = offsets.off(*i);
qsort((int *) tab2, size, sizeof(int), sortcmp);
sign = new int[size];
hashvalue = 0;
for (x = 0; x < size; x++) {
for (y = 0, s = 0; tab2[y] != tab1[x]; y++)
if (y < size && tab2[y] != tab2[y+1])
s++;
sign[x] = s;
hashvalue = hashvalue*x+sign[x];
}
delete[] tab1;
delete[] tab2;
}