本文整理汇总了Java中org.apache.lucene.util.RamUsageEstimator.shallowSizeOf方法的典型用法代码示例。如果您正苦于以下问题:Java RamUsageEstimator.shallowSizeOf方法的具体用法?Java RamUsageEstimator.shallowSizeOf怎么用?Java RamUsageEstimator.shallowSizeOf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.util.RamUsageEstimator
的用法示例。
在下文中一共展示了RamUsageEstimator.shallowSizeOf方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ramBytesUsed
import org.apache.lucene.util.RamUsageEstimator; //导入方法依赖的package包/类
private long ramBytesUsed(Arc<T>[] arcs) {
long size = 0;
if (arcs != null) {
size += RamUsageEstimator.shallowSizeOf(arcs);
for (Arc<T> arc : arcs) {
if (arc != null) {
size += ARC_SHALLOW_RAM_BYTES_USED;
if (arc.output != null && arc.output != outputs.getNoOutput()) {
size += outputs.ramBytesUsed(arc.output);
}
if (arc.nextFinalOutput != null && arc.nextFinalOutput != outputs.getNoOutput()) {
size += outputs.ramBytesUsed(arc.nextFinalOutput);
}
}
}
}
return size;
}
示例2: Builder
import org.apache.lucene.util.RamUsageEstimator; //导入方法依赖的package包/类
Builder(int pageSize, float acceptableOverheadRatio) {
pageShift = checkBlockSize(pageSize, MIN_PAGE_SIZE, MAX_PAGE_SIZE);
pageMask = pageSize - 1;
this.acceptableOverheadRatio = acceptableOverheadRatio;
values = new PackedInts.Reader[INITIAL_PAGE_COUNT];
pending = new long[pageSize];
valuesOff = 0;
pendingOff = 0;
size = 0;
ramBytesUsed = baseRamBytesUsed() + RamUsageEstimator.sizeOf(pending) + RamUsageEstimator.shallowSizeOf(values);
}
示例3: Lucene40DocValuesReader
import org.apache.lucene.util.RamUsageEstimator; //导入方法依赖的package包/类
Lucene40DocValuesReader(SegmentReadState state, String filename, String legacyKey) throws IOException {
this.state = state;
this.legacyKey = legacyKey;
this.dir = new CompoundFileDirectory(state.directory, filename, state.context, false);
ramBytesUsed = new AtomicLong(RamUsageEstimator.shallowSizeOf(getClass()));
}
示例4: TermInfosReaderIndex
import org.apache.lucene.util.RamUsageEstimator; //导入方法依赖的package包/类
/**
* Loads the segment information at segment load time.
*
* @param indexEnum
* the term enum.
* @param indexDivisor
* the index divisor.
* @param tiiFileLength
* the size of the tii file, used to approximate the size of the
* buffer.
* @param totalIndexInterval
* the total index interval.
*/
TermInfosReaderIndex(SegmentTermEnum indexEnum, int indexDivisor, long tiiFileLength, int totalIndexInterval) throws IOException {
this.totalIndexInterval = totalIndexInterval;
indexSize = 1 + ((int) indexEnum.size - 1) / indexDivisor;
skipInterval = indexEnum.skipInterval;
// this is only an inital size, it will be GCed once the build is complete
long initialSize = (long) (tiiFileLength * 1.5) / indexDivisor;
PagedBytes dataPagedBytes = new PagedBytes(estimatePageBits(initialSize));
PagedBytesDataOutput dataOutput = dataPagedBytes.getDataOutput();
final int bitEstimate = 1+MathUtil.log(tiiFileLength, 2);
GrowableWriter indexToTerms = new GrowableWriter(bitEstimate, indexSize, PackedInts.DEFAULT);
String currentField = null;
List<String> fieldStrs = new ArrayList<>();
int fieldCounter = -1;
for (int i = 0; indexEnum.next(); i++) {
Term term = indexEnum.term();
if (currentField == null || !currentField.equals(term.field())) {
currentField = term.field();
fieldStrs.add(currentField);
fieldCounter++;
}
TermInfo termInfo = indexEnum.termInfo();
indexToTerms.set(i, dataOutput.getPosition());
dataOutput.writeVInt(fieldCounter);
dataOutput.writeString(term.text());
dataOutput.writeVInt(termInfo.docFreq);
if (termInfo.docFreq >= skipInterval) {
dataOutput.writeVInt(termInfo.skipOffset);
}
dataOutput.writeVLong(termInfo.freqPointer);
dataOutput.writeVLong(termInfo.proxPointer);
dataOutput.writeVLong(indexEnum.indexPointer);
for (int j = 1; j < indexDivisor; j++) {
if (!indexEnum.next()) {
break;
}
}
}
fields = new Term[fieldStrs.size()];
for (int i = 0; i < fields.length; i++) {
fields[i] = new Term(fieldStrs.get(i));
}
dataPagedBytes.freeze(true);
dataInput = dataPagedBytes.getDataInput();
indexToDataOffset = indexToTerms.getMutable();
long ramBytesUsed = RamUsageEstimator.shallowSizeOf(fields);
ramBytesUsed += RamUsageEstimator.shallowSizeOf(dataInput);
ramBytesUsed += fields.length * RamUsageEstimator.shallowSizeOfInstance(Term.class);
ramBytesUsed += dataPagedBytes.ramBytesUsed();
ramBytesUsed += indexToDataOffset.ramBytesUsed();
this.ramBytesUsed = ramBytesUsed;
}
示例5: ramBytesUsed
import org.apache.lucene.util.RamUsageEstimator; //导入方法依赖的package包/类
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(encodedSizes) + RamUsageEstimator.sizeOf(iterations)
+ RamUsageEstimator.shallowSizeOf(decoders) + RamUsageEstimator.shallowSizeOf(encoders);
}
示例6: grow
import org.apache.lucene.util.RamUsageEstimator; //导入方法依赖的package包/类
void grow(int newBlockCount) {
ramBytesUsed -= RamUsageEstimator.shallowSizeOf(values);
values = Arrays.copyOf(values, newBlockCount);
ramBytesUsed += RamUsageEstimator.shallowSizeOf(values);
}