本文整理汇总了Java中com.carrotsearch.hppc.IntHashSet类的典型用法代码示例。如果您正苦于以下问题:Java IntHashSet类的具体用法?Java IntHashSet怎么用?Java IntHashSet使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
IntHashSet类属于com.carrotsearch.hppc包,在下文中一共展示了IntHashSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testAccuracy
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
public void testAccuracy() {
final long bucket = randomInt(20);
final int numValues = randomIntBetween(1, 100000);
final int maxValue = randomIntBetween(1, randomBoolean() ? 1000: 100000);
final int p = randomIntBetween(14, MAX_PRECISION);
IntHashSet set = new IntHashSet();
HyperLogLogPlusPlus e = new HyperLogLogPlusPlus(p, BigArrays.NON_RECYCLING_INSTANCE, 1);
for (int i = 0; i < numValues; ++i) {
final int n = randomInt(maxValue);
set.add(n);
final long hash = BitMixer.mix64(n);
e.collect(bucket, hash);
if (randomInt(100) == 0) {
//System.out.println(e.cardinality(bucket) + " <> " + set.size());
assertThat((double) e.cardinality(bucket), closeTo(set.size(), 0.1 * set.size()));
}
}
assertThat((double) e.cardinality(bucket), closeTo(set.size(), 0.1 * set.size()));
}
示例2: testKeybasedGraphPartitioning
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
@Test
public void testKeybasedGraphPartitioning() {
Object[] options = {option(GraphDatabaseConfiguration.IDS_FLUSH), false,
option(VertexIDAssigner.PLACEMENT_STRATEGY), PropertyPlacementStrategy.class.getName(),
option(PropertyPlacementStrategy.PARTITION_KEY), "clusterId"};
clopen(options);
int[] groupDegrees = {5,5,5,5,5,5,5,5};
int numVertices = setupGroupClusters(groupDegrees,CommitMode.PER_VERTEX);
IntSet partitionIds = new IntHashSet(numVertices); //to track the "spread" of partition ids
for (int i=0;i<groupDegrees.length;i++) {
TitanVertex g = getOnlyVertex(tx.query().has("groupid","group"+i));
int partitionId = -1;
for (TitanVertex v : g.query().direction(Direction.IN).labels("member").vertices()) {
if (partitionId<0) partitionId = getPartitionID(v);
assertEquals(partitionId,getPartitionID(v));
partitionIds.add(partitionId);
}
}
assertTrue(partitionIds.size()>numPartitions/2); //This is a probabilistic test that might fail
}
示例3: testNewIndexRestored
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
public void testNewIndexRestored() {
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3)))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNewRestore(metaData.index("test"), new SnapshotRecoverySource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"), new IntHashSet()).build()).build();
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) {
assertThat(shard.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NEW_INDEX_RESTORED));
}
}
示例4: applyDeletedShards
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
private void applyDeletedShards(final ClusterChangedEvent event) {
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
if (routingNode == null) {
return;
}
IntHashSet newShardIds = new IntHashSet();
for (IndexService indexService : indicesService) {
String index = indexService.index().name();
IndexMetaData indexMetaData = event.state().metaData().index(index);
if (indexMetaData == null) {
continue;
}
// now, go over and delete shards that needs to get deleted
newShardIds.clear();
for (ShardRouting shard : routingNode) {
if (shard.index().equals(index)) {
newShardIds.add(shard.id());
}
}
for (Integer existingShardId : indexService.shardIds()) {
if (!newShardIds.contains(existingShardId)) {
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
if (logger.isDebugEnabled()) {
logger.debug("[{}][{}] removing shard (index is closed)", index, existingShardId);
}
indexService.removeShard(existingShardId, "removing shard (index is closed)");
} else {
// we can just remove the shard, without cleaning it locally, since we will clean it
// when all shards are allocated in the IndicesStore
if (logger.isDebugEnabled()) {
logger.debug("[{}][{}] removing shard (not allocated)", index, existingShardId);
}
indexService.removeShard(existingShardId, "removing shard (not allocated)");
}
}
}
}
}
示例5: getEvalMode
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
public static EvalMode getEvalMode(VectorAccessible incoming, LogicalExpression expr, IntHashSet transferFieldIds){
// add value vector to transfer if direct reference and this is allowed, otherwise, add to evaluation stack.
final boolean canDirectTransfer =
// the expression is a direct read.
expr instanceof ValueVectorReadExpression
// we aren't dealing with a selection vector.
&& incoming.getSchema().getSelectionVectorMode() == SelectionVectorMode.NONE
// the field doesn't have a red path (e.g. a single value out of a list)
&& !((ValueVectorReadExpression) expr).hasReadPath()
// We aren't already transferring the field.
&& !transferFieldIds.contains(((ValueVectorReadExpression) expr).getFieldId().getFieldIds()[0]);
if(canDirectTransfer){
return EvalMode.DIRECT;
}
final boolean isComplex =
expr instanceof FunctionHolderExpr
&& ((FunctionHolderExpr) expr).isComplexWriterFuncHolder();
if(isComplex){
return EvalMode.COMPLEX;
}
return EvalMode.EVAL;
}
示例6: testPartitionSpread
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
private void testPartitionSpread(boolean flush, boolean batchCommit) {
Object[] options = {option(GraphDatabaseConfiguration.IDS_FLUSH), flush};
clopen(options);
int[] groupDegrees = {10,15,10,17,10,4,7,20,11};
int numVertices = setupGroupClusters(groupDegrees,batchCommit?CommitMode.BATCH:CommitMode.PER_VERTEX);
IntSet partitionIds = new IntHashSet(numVertices); //to track the "spread" of partition ids
for (int i=0;i<groupDegrees.length;i++) {
TitanVertex g = getOnlyVertex(tx.query().has("groupid","group"+i));
assertCount(groupDegrees[i],g.edges(Direction.OUT,"contain"));
assertCount(groupDegrees[i],g.edges(Direction.IN,"member"));
assertCount(groupDegrees[i],g.query().direction(Direction.OUT).edges());
assertCount(groupDegrees[i],g.query().direction(Direction.IN).edges());
assertCount(groupDegrees[i]*2,g.query().edges());
for (TitanVertex v : g.query().direction(Direction.IN).labels("member").vertices()) {
int pid = getPartitionID(v);
partitionIds.add(pid);
assertEquals(g, getOnlyElement(v.query().direction(Direction.OUT).labels("member").vertices()));
VertexList vlist = v.query().direction(Direction.IN).labels("contain").vertexIds();
assertEquals(1,vlist.size());
assertEquals(pid,idManager.getPartitionId(vlist.getID(0)));
assertEquals(g,vlist.get(0));
}
}
if (flush || !batchCommit) { //In these cases we would expect significant spread across partitions
assertTrue(partitionIds.size()>numPartitions/2); //This is a probabilistic test that might fail
} else {
assertEquals(1,partitionIds.size()); //No spread in this case
}
}
示例7: toIntSet
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
public static IntSet toIntSet(final byte[] bytes) {
IntSet bits = new IntHashSet(bytes == null ? 0 : DEFAULT_EXPECTED_ELEMENTS);
if (bytes != null && bytes.length > 0) {
for (int i = 0; i < (bytes.length * 8); i++) {
if ((bytes[i / 8] & (1 << (7 - (i % 8)))) != 0) {
bits.add(i);
}
}
}
return bits;
}
示例8: MemoryUsageCollector
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private MemoryUsageCollector(Instrumentation instrumentation, Object targetObject, ClassLoader targetClassloader,
Deque stack, IntHashSet countedObjectSet,
boolean traverseClassLoaderClasses) {
this.instrumentation = instrumentation;
this.targetObject = targetObject;
this.targetClassloader = targetClassloader;
this.traverseClassLoaderClasses = traverseClassLoaderClasses;
this.stack = stack;
this.countedObjectSet = countedObjectSet;
this.memoryUsageSnapshot = new MemoryUsageSnapshot(targetObject, targetClassloader);
}
示例9: main
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
public static void main(String[] args) {
final long start = System.currentTimeMillis();
final IntHashSet a = new com.carrotsearch.hppc.IntHashSet();
for( int i = 10000000; i-- != 0; ) a.add(i);
IntHashSet b = new com.carrotsearch.hppc.IntHashSet(a.size());
b.addAll(a);
b = new com.carrotsearch.hppc.IntHashSet();
b.addAll(a);
final long time = System.currentTimeMillis() - start;
System.out.println( time / 1000.0 );
System.out.println( b.size() );
}
示例10: create
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
private TestContext create(SearchType... searchTypes) throws Exception {
assertAcked(prepareCreate("index").addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("field1")
.field("type", "long")
.endObject()
.startObject("field2")
.field("type", "keyword")
.endObject()
.startObject("nested")
.field("type", "nested")
.startObject("properties")
.startObject("field3")
.field("type", "long")
.endObject()
.startObject("field4")
.field("type", "keyword")
.endObject()
.endObject()
.endObject()
.endObject().endObject().endObject()));
int numDocs = 2 + randomInt(512);
int scrollRequestSize = randomIntBetween(1, rarely() ? numDocs : numDocs / 2);
boolean unevenRouting = randomBoolean();
int numMissingDocs = scaledRandomIntBetween(0, numDocs / 100);
IntHashSet missingDocs = new IntHashSet(numMissingDocs);
for (int i = 0; i < numMissingDocs; i++) {
while (!missingDocs.add(randomInt(numDocs))) {}
}
for (int i = 1; i <= numDocs; i++) {
IndexRequestBuilder indexRequestBuilder = client()
.prepareIndex("index", "type", String.valueOf(i));
if (missingDocs.contains(i)) {
indexRequestBuilder.setSource("x", "y");
} else {
indexRequestBuilder.setSource(jsonBuilder().startObject()
.field("field1", i)
.field("field2", String.valueOf(i))
.startObject("nested")
.field("field3", i)
.field("field4", String.valueOf(i))
.endObject()
.endObject());
}
if (unevenRouting && randomInt(3) <= 2) {
indexRequestBuilder.setRouting("a");
}
indexRandom(false, indexRequestBuilder);
}
refresh();
final SortBuilder sort;
if (randomBoolean()) {
if (randomBoolean()) {
sort = SortBuilders.fieldSort("field1").missing(1);
} else {
sort = SortBuilders.fieldSort("field2")
.missing("1");
}
} else {
if (randomBoolean()) {
sort = SortBuilders.fieldSort("nested.field3").missing(1);
} else {
sort = SortBuilders.fieldSort("nested.field4").missing("1");
}
}
sort.order(randomBoolean() ? SortOrder.ASC : SortOrder.DESC);
SearchType searchType = RandomPicks.randomFrom(random(), Arrays.asList(searchTypes));
logger.info("numDocs={}, scrollRequestSize={}, sort={}, searchType={}", numDocs, scrollRequestSize, sort, searchType);
return new TestContext(numDocs, scrollRequestSize, sort, searchType);
}
示例11: createRecoveryStateAndInitalizeAllocations
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
private ClusterState createRecoveryStateAndInitalizeAllocations(MetaData metaData, TestGatewayAllocator gatewayAllocator) {
DiscoveryNode node1 = newNode("node1");
MetaData.Builder metaDataBuilder = new MetaData.Builder(metaData);
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
for (ObjectCursor<IndexMetaData> cursor: metaData.indices().values()) {
Index index = cursor.value.getIndex();
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(cursor.value);
final int recoveryType = randomInt(5);
if (recoveryType <= 4) {
addInSyncAllocationIds(index, indexMetaDataBuilder, gatewayAllocator, node1);
}
IndexMetaData indexMetaData = indexMetaDataBuilder.build();
metaDataBuilder.put(indexMetaData, false);
switch (recoveryType) {
case 0:
routingTableBuilder.addAsRecovery(indexMetaData);
break;
case 1:
routingTableBuilder.addAsFromCloseToOpen(indexMetaData);
break;
case 2:
routingTableBuilder.addAsFromDangling(indexMetaData);
break;
case 3:
routingTableBuilder.addAsNewRestore(indexMetaData,
new SnapshotRecoverySource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT,
indexMetaData.getIndex().getName()), new IntHashSet());
break;
case 4:
routingTableBuilder.addAsRestore(indexMetaData,
new SnapshotRecoverySource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT,
indexMetaData.getIndex().getName()));
break;
case 5:
routingTableBuilder.addAsNew(indexMetaData);
break;
default:
throw new IndexOutOfBoundsException();
}
}
return ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.nodes(DiscoveryNodes.builder().add(node1))
.metaData(metaDataBuilder.build())
.routingTable(routingTableBuilder.build()).build();
}
示例12: testRestoreIndexWithShardsMissingInLocalGateway
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
public void testRestoreIndexWithShardsMissingInLocalGateway() throws Exception {
logger.info("--> start 2 nodes");
Settings nodeSettings = Settings.builder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)
.build();
internalCluster().startNode(nodeSettings);
internalCluster().startNode(nodeSettings);
cluster().wipeIndices("_all");
logger.info("--> create repository");
PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo")
.setType("fs").setSettings(Settings.builder().put("location", randomRepoPath())).execute().actionGet();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
int numberOfShards = 6;
logger.info("--> create an index that will have some unallocated shards");
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", numberOfShards)
.put("number_of_replicas", 0)));
ensureGreen();
logger.info("--> indexing some data into test-idx");
for (int i = 0; i < 100; i++) {
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
assertThat(client().prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
logger.info("--> start snapshot");
assertThat(client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setIndices("test-idx").setWaitForCompletion(true).get().getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
logger.info("--> close the index");
assertAcked(client().admin().indices().prepareClose("test-idx"));
logger.info("--> shutdown one of the nodes that should make half of the shards unavailable");
internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() {
@Override
public boolean clearData(String nodeName) {
return true;
}
});
assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("2").execute().actionGet().isTimedOut(), equalTo(false));
logger.info("--> restore index snapshot");
assertThat(client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-1").setRestoreGlobalState(false).setWaitForCompletion(true).get().getRestoreInfo().successfulShards(), equalTo(6));
ensureGreen("test-idx");
assertThat(client().prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
IntSet reusedShards = new IntHashSet();
for (RecoveryState recoveryState : client().admin().indices().prepareRecoveries("test-idx").get().shardRecoveryStates().get("test-idx")) {
if (recoveryState.getIndex().reusedBytes() > 0) {
reusedShards.add(recoveryState.getShardId().getId());
}
}
logger.info("--> check that at least half of the shards had some reuse: [{}]", reusedShards);
assertThat(reusedShards.size(), greaterThanOrEqualTo(numberOfShards / 2));
}
示例13: getObjectSet
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
public IntHashSet getObjectSet() {
return objectSet;
}
示例14: toIntSet
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
public static Collector<Integer, ?, IntHashSet> toIntSet() {
return new CollectorImpl<>(IntHashSet::new, IntHashSet::add, (left, right) -> {
left.addAll(right);
return left;
}, CH_ID);
}
示例15: dirichletMultinomialLikelihoodRatio
import com.carrotsearch.hppc.IntHashSet; //导入依赖的package包/类
/** What is the probability that these two observations were drawn from
* the same multinomial with symmetric Dirichlet prior alpha, relative
* to the probability that they were drawn from different multinomials
* both drawn from this Dirichlet?
*/
public static double dirichletMultinomialLikelihoodRatio(IntIntHashMap countsX,
IntIntHashMap countsY,
double alpha, double alphaSum) {
// The likelihood for one DCM is
// Gamma( alpha_sum ) prod Gamma( alpha + N_i )
// prod Gamma ( alpha ) Gamma ( alpha_sum + N )
// When we divide this by the product of two other DCMs with the same
// alpha parameter, the first term in the numerator cancels with the
// first term in the denominator. Then moving the remaining alpha-only
// term to the numerator, we get
// prod Gamma(alpha) prod Gamma( alpha + X_i + Y_i )
// Gamma (alpha_sum) Gamma( alpha_sum + X_sum + Y_sum )
// ----------------------------------------------------------
// prod Gamma(alpha + X_i) prod Gamma(alpha + Y_i)
// Gamma( alpha_sum + X_sum ) Gamma( alpha_sum + Y_sum )
double logLikelihood = 0.0;
double logGammaAlpha = logGamma(alpha);
int totalX = 0;
int totalY = 0;
int key, x, y;
IntHashSet distinctKeys = new IntHashSet();
distinctKeys.addAll(countsX.keys());
distinctKeys.addAll(countsY.keys());
for (IntCursor cursor : distinctKeys) {
key = cursor.value;
x = 0;
if (countsX.containsKey(key)) {
x = countsX.get(key);
}
y = 0;
if (countsY.containsKey(key)) {
y = countsY.get(key);
}
totalX += x;
totalY += y;
logLikelihood += logGamma(alpha) + logGamma(alpha + x + y)
- logGamma(alpha + x) - logGamma(alpha + y);
}
logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY)
- logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY);
return logLikelihood;
}