本文整理汇总了Java中com.carrotsearch.hppc.ObjectLongMap类的典型用法代码示例。如果您正苦于以下问题:Java ObjectLongMap类的具体用法?Java ObjectLongMap怎么用?Java ObjectLongMap使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ObjectLongMap类属于com.carrotsearch.hppc包,在下文中一共展示了ObjectLongMap类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: MatchingNodes
import com.carrotsearch.hppc.ObjectLongMap; //导入依赖的package包/类
MatchingNodes(ObjectLongMap<DiscoveryNode> nodesToSize, @Nullable Map<String, NodeAllocationResult> nodeDecisions) {
this.nodesToSize = nodesToSize;
this.nodeDecisions = nodeDecisions;
long highestMatchSize = 0;
DiscoveryNode highestMatchNode = null;
for (ObjectLongCursor<DiscoveryNode> cursor : nodesToSize) {
if (cursor.value > highestMatchSize) {
highestMatchSize = cursor.value;
highestMatchNode = cursor.key;
}
}
this.nodeWithHighestMatch = highestMatchNode;
}
示例2: testDuell
import com.carrotsearch.hppc.ObjectLongMap; //导入依赖的package包/类
public void testDuell() {
final int len = randomIntBetween(1, 100000);
final BytesRef[] values = new BytesRef[len];
for (int i = 0; i < values.length; ++i) {
values[i] = new BytesRef(randomAsciiOfLength(5));
}
final ObjectLongMap<BytesRef> valueToId = new ObjectLongHashMap<>();
final BytesRef[] idToValue = new BytesRef[values.length];
final int iters = randomInt(1000000);
for (int i = 0; i < iters; ++i) {
final BytesRef value = randomFrom(values);
if (valueToId.containsKey(value)) {
assertEquals(- 1 - valueToId.get(value), hash.add(value, value.hashCode()));
} else {
assertEquals(valueToId.size(), hash.add(value, value.hashCode()));
idToValue[valueToId.size()] = value;
valueToId.put(value, valueToId.size());
}
}
assertEquals(valueToId.size(), hash.size());
for (Iterator<ObjectLongCursor<BytesRef>> iterator = valueToId.iterator(); iterator.hasNext(); ) {
final ObjectLongCursor<BytesRef> next = iterator.next();
assertEquals(next.value, hash.find(next.key, next.key.hashCode()));
}
for (long i = 0; i < hash.capacity(); ++i) {
final long id = hash.id(i);
BytesRef spare = new BytesRef();
if (id >= 0) {
hash.get(id, spare);
assertEquals(idToValue[(int) id], spare);
}
}
hash.close();
}
示例3: statsFromResponse
import com.carrotsearch.hppc.ObjectLongMap; //导入依赖的package包/类
private static ObjectLongMap<TableIdent> statsFromResponse(SQLResponse sqlResponse) {
ObjectLongMap<TableIdent> newStats = new ObjectLongHashMap<>((int) sqlResponse.rowCount());
for (Object[] row : sqlResponse.rows()) {
newStats.put(new TableIdent((String) row[1], (String) row[2]), (long) row[0]);
}
return newStats;
}
示例4: numDocs
import com.carrotsearch.hppc.ObjectLongMap; //导入依赖的package包/类
/**
* Returns the number of docs a table has.
*
* <p>
* The returned number isn't an accurate real-time value but a cached value that is periodically updated
* </p>
* Returns -1 if the table isn't in the cache
*/
public long numDocs(TableIdent tableIdent) {
ObjectLongMap<TableIdent> stats = tableStats;
if (stats == null) {
stats = statsFromResponse(transportSQLAction.get().execute(REQUEST).actionGet(30, TimeUnit.SECONDS));
tableStats = stats;
}
if (stats.containsKey(tableIdent)) {
return stats.get(tableIdent);
}
return -1;
}
示例5: MatchingNodes
import com.carrotsearch.hppc.ObjectLongMap; //导入依赖的package包/类
public MatchingNodes(ObjectLongMap<DiscoveryNode> nodesToSize) {
this.nodesToSize = nodesToSize;
long highestMatchSize = 0;
DiscoveryNode highestMatchNode = null;
for (ObjectLongCursor<DiscoveryNode> cursor : nodesToSize) {
if (cursor.value > highestMatchSize) {
highestMatchSize = cursor.value;
highestMatchNode = cursor.key;
}
}
this.nodeWithHighestMatch = highestMatchNode;
}
示例6: findMatchingNodes
import com.carrotsearch.hppc.ObjectLongMap; //导入依赖的package包/类
private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation,
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore,
AsyncShardFetch.FetchResult<NodeStoreFilesMetaData> data,
boolean explain) {
ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>();
Map<String, NodeAllocationResult> nodeDecisions = explain ? new HashMap<>() : null;
for (Map.Entry<DiscoveryNode, NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) {
DiscoveryNode discoNode = nodeStoreEntry.getKey();
TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
// we don't have any files at all, it is an empty index
if (storeFilesMetaData.isEmpty()) {
continue;
}
RoutingNode node = allocation.routingNodes().node(discoNode.getId());
if (node == null) {
continue;
}
// check if we can allocate on that node...
// we only check for NO, since if this node is THROTTLING and it has enough "same data"
// then we will try and assign it next time
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
long matchingBytes = -1;
if (explain) {
matchingBytes = computeMatchingBytes(primaryStore, storeFilesMetaData);
ShardStoreInfo shardStoreInfo = new ShardStoreInfo(matchingBytes);
nodeDecisions.put(node.nodeId(), new NodeAllocationResult(discoNode, shardStoreInfo, decision));
}
if (decision.type() == Decision.Type.NO) {
continue;
}
if (matchingBytes < 0) {
matchingBytes = computeMatchingBytes(primaryStore, storeFilesMetaData);
}
nodesToSize.put(discoNode, matchingBytes);
if (logger.isTraceEnabled()) {
if (matchingBytes == Long.MAX_VALUE) {
logger.trace("{}: node [{}] has same sync id {} as primary", shard, discoNode.getName(), storeFilesMetaData.syncId());
} else {
logger.trace("{}: node [{}] has [{}/{}] bytes of re-usable data",
shard, discoNode.getName(), new ByteSizeValue(matchingBytes), matchingBytes);
}
}
}
return new MatchingNodes(nodesToSize, nodeDecisions);
}
示例7: findMatchingNodes
import com.carrotsearch.hppc.ObjectLongMap; //导入依赖的package包/类
private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation,
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore,
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data) {
ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>();
for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) {
DiscoveryNode discoNode = nodeStoreEntry.getKey();
TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
if (storeFilesMetaData == null) {
// already allocated on that node...
continue;
}
RoutingNode node = allocation.routingNodes().node(discoNode.id());
if (node == null) {
continue;
}
// check if we can allocate on that node...
// we only check for NO, since if this node is THROTTLING and it has enough "same data"
// then we will try and assign it next time
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
if (decision.type() == Decision.Type.NO) {
continue;
}
// if it is already allocated, we can't assign to it... (and it might be primary as well)
if (storeFilesMetaData.allocated()) {
continue;
}
// we don't have any files at all, it is an empty index
if (storeFilesMetaData.iterator().hasNext() == false) {
continue;
}
String primarySyncId = primaryStore.syncId();
String replicaSyncId = storeFilesMetaData.syncId();
// see if we have a sync id we can make use of
if (replicaSyncId != null && replicaSyncId.equals(primarySyncId)) {
logger.trace("{}: node [{}] has same sync id {} as primary", shard, discoNode.name(), replicaSyncId);
nodesToSize.put(discoNode, Long.MAX_VALUE);
} else {
long sizeMatched = 0;
for (StoreFileMetaData storeFileMetaData : storeFilesMetaData) {
String metaDataFileName = storeFileMetaData.name();
if (primaryStore.fileExists(metaDataFileName) && primaryStore.file(metaDataFileName).isSame(storeFileMetaData)) {
sizeMatched += storeFileMetaData.length();
}
}
logger.trace("{}: node [{}] has [{}/{}] bytes of re-usable data",
shard, discoNode.name(), new ByteSizeValue(sizeMatched), sizeMatched);
nodesToSize.put(discoNode, sizeMatched);
}
}
return new MatchingNodes(nodesToSize);
}