本文整理汇总了Java中org.apache.lucene.util.Accountable类的典型用法代码示例。如果您正苦于以下问题:Java Accountable类的具体用法?Java Accountable怎么用?Java Accountable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Accountable类属于org.apache.lucene.util包,在下文中一共展示了Accountable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: empty
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
public static AtomicGeoPointFieldData empty(final int maxDoc) {
return new AbstractAtomicGeoPointFieldData() {
@Override
public long ramBytesUsed() {
return 0;
}
@Override
public Collection<Accountable> getChildResources() {
return Collections.emptyList();
}
@Override
public void close() {
}
@Override
public MultiGeoPointValues getGeoPointValues() {
return FieldData.emptyMultiGeoPoints(maxDoc);
}
};
}
示例2: empty
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
public static AtomicOrdinalsFieldData empty() {
return new AbstractAtomicOrdinalsFieldData(DEFAULT_SCRIPT_FUNCTION) {
@Override
public long ramBytesUsed() {
return 0;
}
@Override
public Collection<Accountable> getChildResources() {
return Collections.emptyList();
}
@Override
public void close() {
}
@Override
public RandomAccessOrds getOrdinalsValues() {
return DocValues.emptySortedSet();
}
};
}
示例3: put
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
/** Sets the key to the value for the provided reader;
* if the key is already set then this doesn't change it. */
public void put(AtomicReader reader, CacheKey key, Accountable value) {
final Object readerKey = reader.getCoreCacheKey();
synchronized (readerCache) {
Map<CacheKey,Accountable> innerCache = readerCache.get(readerKey);
if (innerCache == null) {
// First time this reader is using FieldCache
innerCache = new HashMap<>();
readerCache.put(readerKey, innerCache);
wrapper.initReader(reader);
}
if (innerCache.get(key) == null) {
innerCache.put(key, value);
} else {
// Another thread beat us to it; leave the current
// value
}
}
}
示例4: readRamTree
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
Accountable readRamTree(StreamInput in) throws IOException {
final String name = in.readString();
final long bytes = in.readVLong();
int numChildren = in.readVInt();
if (numChildren == 0) {
return Accountables.namedAccountable(name, bytes);
}
List<Accountable> children = new ArrayList(numChildren);
while (numChildren-- > 0) {
children.add(readRamTree(in));
}
return Accountables.namedAccountable(name, children, bytes);
}
示例5: writeRamTree
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
void writeRamTree(StreamOutput out, Accountable tree) throws IOException {
out.writeString(tree.toString());
out.writeVLong(tree.ramBytesUsed());
Collection<Accountable> children = tree.getChildResources();
out.writeVInt(children.size());
for (Accountable child : children) {
writeRamTree(out, child);
}
}
示例6: load
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
@Override
public AtomicParentChildFieldData load(LeafReaderContext context) {
final LeafReader reader = context.reader();
return new AbstractAtomicParentChildFieldData() {
public Set<String> types() {
return parentTypes;
}
@Override
public SortedDocValues getOrdinalsValues(String type) {
try {
return DocValues.getSorted(reader, ParentFieldMapper.joinField(type));
} catch (IOException e) {
throw new IllegalStateException("cannot load join doc values field for type [" + type + "]", e);
}
}
@Override
public long ramBytesUsed() {
// unknown
return 0;
}
@Override
public Collection<Accountable> getChildResources() {
return Collections.emptyList();
}
@Override
public void close() throws ElasticsearchException {
}
};
}
示例7: getChildResources
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
@Override
public Collection<Accountable> getChildResources() {
List<Accountable> resources = new ArrayList<>();
resources.add(Accountables.namedAccountable("ordinals", ordinals));
resources.add(Accountables.namedAccountable("term bytes", bytes));
resources.add(Accountables.namedAccountable("term offsets", termOrdToBytesOffset));
return Collections.unmodifiableList(resources);
}
示例8: empty
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
public static AtomicNumericFieldData empty(final int maxDoc) {
return new AtomicDoubleFieldData(0) {
@Override
public SortedNumericDoubleValues getDoubleValues() {
return FieldData.emptySortedNumericDoubles(maxDoc);
}
@Override
public Collection<Accountable> getChildResources() {
return Collections.emptyList();
}
};
}
示例9: empty
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
public static AtomicParentChildFieldData empty() {
return new AbstractAtomicParentChildFieldData() {
@Override
public long ramBytesUsed() {
return 0;
}
@Override
public Collection<Accountable> getChildResources() {
return Collections.emptyList();
}
@Override
public void close() {
}
@Override
public SortedDocValues getOrdinalsValues(String type) {
return DocValues.emptySorted();
}
@Override
public Set<String> types() {
return emptySet();
}
};
}
示例10: toXContent
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
static void toXContent(XContentBuilder builder, Accountable tree) throws IOException {
builder.startObject();
builder.field(Fields.DESCRIPTION, tree.toString());
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(tree.ramBytesUsed()));
Collection<Accountable> children = tree.getChildResources();
if (children.isEmpty() == false) {
builder.startArray(Fields.CHILDREN);
for (Accountable child : children) {
toXContent(builder, child);
}
builder.endArray();
}
builder.endObject();
}
示例11: onCache
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
@Override
public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
shard.fieldData().onCache(shardId, fieldName, ramUsage);
}
}
}
示例12: onRemoval
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
public void onRemoval(Accountable key, Accountable value, boolean evicted) {
if (evicted) {
evictionsMetric.inc();
}
long dec = 0;
if (key != null) {
dec += key.ramBytesUsed();
}
if (value != null) {
dec += value.ramBytesUsed();
}
totalMetric.dec(dec);
}
示例13: testGlobalOrdinalsGetRemovedOnceIndexReaderCloses
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
public void testGlobalOrdinalsGetRemovedOnceIndexReaderCloses() throws Exception {
fillExtendedMvSet();
refreshReader();
IndexOrdinalsFieldData ifd = getForField("string", "value", hasDocValues());
IndexOrdinalsFieldData globalOrdinals = ifd.loadGlobal(topLevelReader);
assertThat(ifd.loadGlobal(topLevelReader), sameInstance(globalOrdinals));
// 3 b/c 1 segment level caches and 1 top level cache
// in case of doc values, we don't cache atomic FD, so only the top-level cache is there
assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 1L : 4L));
IndexOrdinalsFieldData cachedInstance = null;
for (Accountable ramUsage : indicesFieldDataCache.getCache().values()) {
if (ramUsage instanceof IndexOrdinalsFieldData) {
cachedInstance = (IndexOrdinalsFieldData) ramUsage;
break;
}
}
assertThat(cachedInstance, sameInstance(globalOrdinals));
topLevelReader.close();
// Now only 3 segment level entries, only the toplevel reader has been closed, but the segment readers are still used by IW
assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 0L : 3L));
refreshReader();
assertThat(ifd.loadGlobal(topLevelReader), not(sameInstance(globalOrdinals)));
ifdService.clear();
assertThat(indicesFieldDataCache.getCache().weight(), equalTo(0L));
}
示例14: CacheEntry
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
public CacheEntry(Object readerKey, String fieldName,
Class<?> cacheType,
Object custom,
Accountable value) {
this.readerKey = readerKey;
this.fieldName = fieldName;
this.cacheType = cacheType;
this.custom = custom;
this.value = value;
}
示例15: IndicesFieldDataCache
import org.apache.lucene.util.Accountable; //导入依赖的package包/类
@Inject
public IndicesFieldDataCache(Settings settings, IndicesFieldDataCacheListener indicesFieldDataCacheListener, ThreadPool threadPool) {
super(settings);
this.threadPool = threadPool;
this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
final String size = settings.get(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1");
final long sizeInBytes = settings.getAsMemory(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1").bytes();
CacheBuilder<Key, Accountable> cacheBuilder = CacheBuilder.newBuilder()
.removalListener(this);
if (sizeInBytes > 0) {
cacheBuilder.maximumWeight(sizeInBytes).weigher(new FieldDataWeigher());
}
// defaults to 4, but this is a busy map for all indices, increase it a bit by default
final int concurrencyLevel = settings.getAsInt(FIELDDATA_CACHE_CONCURRENCY_LEVEL, 16);
if (concurrencyLevel <= 0) {
throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel);
}
cacheBuilder.concurrencyLevel(concurrencyLevel);
logger.debug("using size [{}] [{}]", size, new ByteSizeValue(sizeInBytes));
cache = cacheBuilder.build();
this.cleanInterval = settings.getAsTime(FIELDDATA_CLEAN_INTERVAL_SETTING, TimeValue.timeValueMinutes(1));
// Start thread that will manage cleaning the field data cache periodically
threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME,
new FieldDataCacheCleaner(this.cache, this.logger, this.threadPool, this.cleanInterval));
}