本文整理汇总了Java中com.carrotsearch.hppc.IntOpenHashSet类的典型用法代码示例。如果您正苦于以下问题:Java IntOpenHashSet类的具体用法?Java IntOpenHashSet怎么用?Java IntOpenHashSet使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
IntOpenHashSet类属于com.carrotsearch.hppc包,在下文中一共展示了IntOpenHashSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: GroupExpandCollector
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
public GroupExpandCollector(SortedDocValues docValues, FixedBitSet groupBits, IntOpenHashSet collapsedSet, int limit, Sort sort) throws IOException {
int numGroups = collapsedSet.size();
groups = new IntObjectOpenHashMap<>(numGroups * 2);
collectors = new ArrayList<>();
DocIdSetIterator iterator = groupBits.iterator();
int group;
while ((group = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
Collector collector = (sort == null) ? TopScoreDocCollector.create(limit, true) : TopFieldCollector.create(sort, limit, false, false, false, true);
groups.put(group, collector);
collectors.add(collector);
}
this.collapsedSet = collapsedSet;
this.groupBits = groupBits;
this.docValues = docValues;
}
示例2: createUriClusterMapping
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
protected static Map<String, int[]> createUriClusterMapping(String[] entityIdUriMapping, IntOpenHashSet[] clusters) {
Map<String, int[]> mapping = new HashMap<String, int[]>();
String currentURI;
int entitiesPerCluster[];
for (int i = 0; i < clusters.length; ++i) {
for (int j = 0; j < clusters[i].allocated.length; ++j) {
if (clusters[i].allocated[j]) {
currentURI = entityIdUriMapping[clusters[i].keys[j]];
if (mapping.containsKey(currentURI)) {
entitiesPerCluster = mapping.get(currentURI);
} else {
entitiesPerCluster = new int[clusters.length];
mapping.put(currentURI, entitiesPerCluster);
}
++entitiesPerCluster[i];
}
}
}
return mapping;
}
示例3: find
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
private void find(BronPartial candidate, IntOpenHashSet toCheck, IntOpenHashSet toExclude) {
if (toCheck.isEmpty() && toExclude.isEmpty()) {
if (candidate.getWeight() > best.getWeight()) {
best = candidate;
}
return;
}
IntOpenHashSet newToCheck = SetUtils.copy(toCheck);
for (IntCursor cursor : toCheck) {
int v = cursor.value;
BronPartial maybePartial = candidate.tryWith(v, graph);
if (maybePartial == null) continue;
IntOpenHashSet recurseToCheck = graph.intersectionWithNeighbors(newToCheck, v);
IntOpenHashSet recurseToExclude = graph.intersectionWithNeighbors(toExclude, v);
find(maybePartial, recurseToCheck, recurseToExclude);
newToCheck.remove(v);
toExclude.add(v);
}
}
示例4: init
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
private void init(Iterable<T> elements, Weigher<T> weigher) {
g = new Graph<>(elements, weigher);
seenCliques = IntOpenHashSet.newInstanceWithExpectedSize(g.size() * (g.size() - 1) / 2);
work = Queues.newArrayDeque(); // queue of partials to evaluate
this.best = NaivePartial.nullInstance;
lastWorkedSize = 0;
if (log.isDebugEnabled()) {
log.debug("Starting search for clique on graph size " + g.size());
}
for (int i = 0; i < g.size(); i++) {
int member = (1 << i);
int neighbors = g.neighborsExcludingAsMask(i, 0);
addNewClique(member, neighbors, 0);
}
}
示例5: update
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
final private void update() {
edgeCount = 0;
adjArr = new boolean[table.size() * table.size()];
for (int v : vertices) {
Adjacency adj = table.get(v);
adj.outArr = adj.outSet.toArray();
if (vertexCount() < 10000)
Arrays.sort(adj.outArr);
for (int w : adj.outArr)
adjArr[v * table.size() + w] = true;
adj.outSet = new IntOpenHashSet(adj.outArr.length, 0.5f);
adj.outSet.add(adj.outArr);
adj.allArr = adj.allSet.toArray();
if (vertexCount() < 10000)
Arrays.sort(adj.allArr);
adj.allSet = new IntOpenHashSet(adj.allArr.length, 0.5f);
adj.allSet.add(adj.allArr);
edgeCount += adj.outArr.length;
}
}
示例6: update
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
final private void update() {
edgeCount = 0;
for (int v : vertices) {
Adjacency adj = table.get(v);
adj.outArr = adj.outSet.toArray();
Arrays.sort(adj.outArr);
adj.outSet = new IntOpenHashSet(adj.outArr.length, 0.5f);
adj.outSet.add(adj.outArr);
adj.allArr = adj.allSet.toArray();
Arrays.sort(adj.allArr);
adj.allSet = new IntOpenHashSet(adj.allArr.length, 0.5f);
adj.allSet.add(adj.allArr);
edgeCount += adj.outArr.length;
}
}
示例7: getDocumentsWithWordAsSet
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
@Override
public void getDocumentsWithWordAsSet(String word, IntOpenHashSet documents) {
DocsEnum docs = null;
Term term = new Term(fieldName, word);
try {
int baseDocId;
for (int i = 0; i < reader.length; i++) {
docs = reader[i].termDocsEnum(term);
baseDocId = contexts[i].docBase;
if (docs != null) {
while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
documents.add(baseDocId + docs.docID());
}
}
}
} catch (IOException e) {
LOGGER.error("Error while requesting documents for word \"" + word + "\".", e);
}
}
示例8: determineCounts
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
public CountedSubsets[] determineCounts(String[][] wordsets,
SegmentationDefinition[] definitions) {
ObjectObjectOpenHashMap<String, IntOpenHashSet> wordDocMapping = new ObjectObjectOpenHashMap<String, IntOpenHashSet>();
for (int i = 0; i < wordsets.length; ++i) {
for (int j = 0; j < wordsets[i].length; ++j) {
if (!wordDocMapping.containsKey(wordsets[i][j])) {
wordDocMapping.put(wordsets[i][j], new IntOpenHashSet());
}
}
}
corpusAdapter.getDocumentsWithWordsAsSet(wordDocMapping);
CountedSubsets countedSubsets[] = new CountedSubsets[definitions.length];
for (int i = 0; i < definitions.length; ++i) {
countedSubsets[i] = new CountedSubsets(definitions[i].segments,
definitions[i].conditions, createCounts(
createBitSets(wordDocMapping, wordsets[i]),
definitions[i].neededCounts));
}
return countedSubsets;
}
示例9: createBitSets
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
private BitSet[] createBitSets(IntOpenHashSet hashSets[],
IntOpenHashSet mergedHashSet) {
BitSet bitSets[] = new BitSet[hashSets.length];
for (int i = 0; i < bitSets.length; ++i) {
bitSets[i] = new BitSet(mergedHashSet.size());
}
int pos = 0;
for (int i = 0; i < mergedHashSet.keys.length; i++) {
if (mergedHashSet.allocated[i]) {
for (int j = 0; j < bitSets.length; ++j) {
if (hashSets[j].contains(mergedHashSet.keys[i])) {
bitSets[j].set(pos);
}
}
++pos;
}
}
return bitSets;
}
示例10: FieldValueCollapse
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
public FieldValueCollapse(int maxDoc,
String field,
int nullPolicy,
boolean max,
boolean needsScores,
IntOpenHashSet boostDocs) {
this.field = field;
this.nullPolicy = nullPolicy;
this.max = max;
this.needsScores = needsScores;
this.collapsedSet = new FixedBitSet(maxDoc);
this.boostDocs = boostDocs;
if(this.boostDocs != null) {
Iterator<IntCursor> it = boostDocs.iterator();
while(it.hasNext()) {
IntCursor cursor = it.next();
this.collapsedSet.set(cursor.value);
}
}
}
示例11: initCentroids
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
/** Initializes random centroids. */
private void initCentroids()
{
IntOpenHashSet set = new IntOpenHashSet();
Random rand = new Random(RAND_SEED);
d_centroid = new double[K*D];
d_scala = new double[K];
while (set.size() < K)
set.add(rand.nextInt(N));
int[] unit;
int k = 0;
for (IntCursor cur : set)
{
unit = v_units.get(cur.value);
for (int index : unit)
d_centroid[getCentroidIndex(k, index)] = 1;
d_scala[k++] = Math.sqrt(unit.length);
}
}
示例12: addEdge
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
public void addEdge(int from, int to) {
if (to < 0 || from < 0) {
throw new IllegalArgumentException();
}
sz = Math.max(sz, Math.max(from, to) + 1);
builtGraph = null;
if (addedEdges.size() == from) {
addedEdges.add(IntOpenHashSet.from(to));
} else if (addedEdges.size() <= from) {
addedEdges.addAll(Collections.nCopies(from - addedEdges.size(), null));
addedEdges.add(IntOpenHashSet.from(to));
} else {
IntSet set = addedEdges.get(from);
if (set == null) {
addedEdges.set(from, IntOpenHashSet.from(to));
} else {
set.add(to);
}
}
}
示例13: propagatePhiUsageInformation
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
private void propagatePhiUsageInformation() {
IntDeque worklist = new IntArrayDeque();
for (int receiverIndex : phisByReceiver.keys().toArray()) {
if (usedPhis.get(receiverIndex)) {
worklist.addLast(receiverIndex);
}
}
IntSet visited = new IntOpenHashSet();
while (!worklist.isEmpty()) {
int varIndex = worklist.removeFirst();
if (!visited.add(varIndex)) {
continue;
}
usedPhis.set(varIndex);
Phi phi = phisByReceiver.get(varIndex);
if (phi != null) {
for (Incoming incoming : phi.getIncomings()) {
if (!visited.contains(incoming.getValue().getIndex())) {
worklist.addLast(incoming.getValue().getIndex());
}
}
}
}
}
示例14: getSmallestNonProjectiveArc
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
/** Called by {@link DEPTree#projectivize()}. */
private DEPNode getSmallestNonProjectiveArc(IntArrayList ids)
{
IntOpenHashSet remove = new IntOpenHashSet();
DEPNode wk, nonProj = null;
int np, max = 0;
for (IntCursor cur : ids)
{
wk = get(cur.value);
np = isNonProjective(wk);
if (np == 0)
{
remove.add(cur.value);
}
else if (np > max)
{
nonProj = wk;
max = np;
}
}
ids.removeAll(remove);
return nonProj;
}
示例15: getDEPTreeWithoutEdited
import com.carrotsearch.hppc.IntOpenHashSet; //导入依赖的package包/类
public DEPTree getDEPTreeWithoutEdited(CTTree cTree, DEPTree dTree)
{
IntOpenHashSet set = new IntOpenHashSet();
addEditedTokensAux(cTree.getRoot(), set);
int i, j, size = dTree.size();
DEPTree tree = new DEPTree();
DEPNode node;
for (i=1,j=1; i<size; i++)
{
if (!set.contains(i))
{
node = dTree.get(i);
node.id = j++;
removeEditedHeads(node.getXHeads(), set);
removeEditedHeads(node.getSHeads(), set);
tree.add(node);
}
}
return (tree.size() == 1) ? null : tree;
}