本文整理汇总了Java中com.carrotsearch.hppc.IntIntOpenHashMap.put方法的典型用法代码示例。如果您正苦于以下问题:Java IntIntOpenHashMap.put方法的具体用法?Java IntIntOpenHashMap.put怎么用?Java IntIntOpenHashMap.put使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.carrotsearch.hppc.IntIntOpenHashMap
的用法示例。
在下文中一共展示了IntIntOpenHashMap.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ProjectPushInfo
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
public ProjectPushInfo(List<SchemaPath> columns, ImmutableList<DesiredField> desiredFields) {
super();
this.columns = columns;
this.desiredFields = desiredFields;
this.fieldNames = Lists.newArrayListWithCapacity(desiredFields.size());
this.types = Lists.newArrayListWithCapacity(desiredFields.size());
IntIntOpenHashMap oldToNewIds = new IntIntOpenHashMap();
int i =0;
for (DesiredField f : desiredFields) {
fieldNames.add(f.name);
types.add(f.field.getType());
oldToNewIds.put(f.origIndex, i);
i++;
}
this.rewriter = new InputRewriter(oldToNewIds);
}
示例2: removeFromVariableCaches
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
private void removeFromVariableCaches (Variable victim)
{
Set survivors = new HashSet(variablesSet ());
survivors.remove (victim);
int vi = 0;
IntIntOpenHashMap dict = new IntIntOpenHashMap (survivors.size ());
// dict.setDefaultValue (-1); No longer supported, but this.getIndex() written to avoid need for this.
my2global = new int[survivors.size ()];
for (Iterator it = survivors.iterator (); it.hasNext();) {
Variable var = (Variable) it.next ();
int gvi = var.getIndex ();
dict.put (gvi, vi);
my2global [vi] = gvi;
}
projectionMap = dict;
numNodes--; // do this at end b/c it affects getVertexSet()
}
示例3: remapGapIndices
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
/** Called by {@link CTReader#normalizeIndices(CTTree)}. */
static private void remapGapIndices(IntIntOpenHashMap map, int[] lastIndex, CTNode curr)
{
int gapIndex = curr.gapIndex;
if (map.containsKey(gapIndex))
{
curr.gapIndex = map.get(gapIndex);
}
else if (gapIndex != -1)
{
curr.gapIndex = lastIndex[0];
map.put(gapIndex, lastIndex[0]++);
}
for (CTNode child : curr.ls_children)
remapGapIndices(map, lastIndex, child);
}
示例4: fordFulkerson
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
public static ObjectOpenHashSet<Edge> fordFulkerson(DirectedGraph graph) {
ObjectOpenHashSet<Edge> result = new ObjectOpenHashSet<>();
ObjectOpenHashSet<Edge> edges = new ObjectOpenHashSet<>(
graph.getEdges());
IntIntOpenHashMap match = new IntIntOpenHashMap();
IntOpenHashSet whiteSet = new IntOpenHashSet();
IntOpenHashSet blackSet = new IntOpenHashSet();
for (ObjectCursor<Edge> ecur : edges) {
whiteSet.add(ecur.value.from);
blackSet.add(ecur.value.to);
match.put(ecur.value.to, -1);
}
for (IntCursor vcur : whiteSet) {
IntOpenHashSet visitedBlack = new IntOpenHashSet();
aug(edges, vcur.value, visitedBlack, match, blackSet);
}
for (IntIntCursor entrycur : match) {
result.add(new Edge(entrycur.value, entrycur.key));
}
return result;
}
示例5: aug
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
private static boolean aug(ObjectOpenHashSet<Edge> edges, int white,
IntOpenHashSet visitedBlack, IntIntOpenHashMap match,
IntOpenHashSet blackSet) {
for (IntCursor blackcur : blackSet) {
boolean contains = false;
final int black = blackcur.value;
for (ObjectCursor<Edge> ecur : edges) {
if ((white == ecur.value.from)
&& (black == ecur.value.to)) {
contains = true;
}
}
if (contains && !visitedBlack.contains(black)) {
visitedBlack.add(black);
if (match.get(black) == -1L
|| aug(edges, match.get(black), visitedBlack, match,
blackSet)) {
match.put(black, white);
return true;
}
}
}
return false;
}
示例6: merge
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
protected List<T> merge(List<T> spans) {
Span spanArray[] = spans.toArray(new Span[spans.size()]);
Arrays.sort(spanArray, this);
IntIntOpenHashMap enclosedByMap = new IntIntOpenHashMap();
boolean isEnclosed;
for (int i = 0; i < spanArray.length; ++i) {
isEnclosed = false;
for (int j = spanArray.length - 1; (j > i) && (!isEnclosed); --j) {
// if spanArray[i] is enclosed by spanArray[j]
if ((spanArray[i].getStartPosition() >= spanArray[j].getStartPosition())
&& ((spanArray[i].getStartPosition() + spanArray[i].getLength()) <= (spanArray[j]
.getStartPosition() + spanArray[j].getLength()))) {
enclosedByMap.put(i, j);
isEnclosed = true;
}
}
}
// if no match could be found
if (enclosedByMap.size() == 0) {
return spans;
}
List<T> mergedMarkings = new ArrayList<T>(spans.size());
// starting with the smallest span, check if a span is enclosed by
// another
int largerSpanId;
for (int i = 0; i < spanArray.length; ++i) {
if (enclosedByMap.containsKey(i)) {
largerSpanId = enclosedByMap.lget();
spanArray[largerSpanId] = merge(spanArray[i], spanArray[largerSpanId]);
} else {
mergedMarkings.add((T) spanArray[i]);
}
}
return mergedMarkings;
}
示例7: requestDocumentsWithWord
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
protected void requestDocumentsWithWord(String word, IntObjectOpenHashMap<IntArrayList[]> positionsInDocs,
IntIntOpenHashMap docLengths, int wordId, int numberOfWords) {
DocsAndPositionsEnum docPosEnum = null;
Term term = new Term(fieldName, word);
int localDocId, globalDocId, baseDocId;
IntArrayList positions[];
try {
for (int i = 0; i < reader.length; i++) {
docPosEnum = reader[i].termPositionsEnum(term);
baseDocId = contexts[i].docBase;
if (docPosEnum != null) {
while (docPosEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
localDocId = docPosEnum.docID();
globalDocId = localDocId + baseDocId;
// if this is the first word and we found a new document
if (!positionsInDocs.containsKey(globalDocId)) {
positions = new IntArrayList[numberOfWords];
positionsInDocs.put(globalDocId, positions);
} else {
positions = positionsInDocs.get(globalDocId);
}
if (positions[wordId] == null) {
positions[wordId] = new IntArrayList();
}
// Go through the positions inside this document
for (int p = 0; p < docPosEnum.freq(); ++p) {
positions[wordId].add(docPosEnum.nextPosition());
}
if (!docLengths.containsKey(globalDocId)) {
// Get the length of the document
docLengths.put(globalDocId, reader[i].document(localDocId).getField(docLengthFieldName)
.numericValue().intValue());
}
}
}
}
} catch (IOException e) {
LOGGER.error("Error while requesting documents for word \"" + word + "\".", e);
}
}
示例8: requestWordPositionsInDocuments
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
@Override
public IntObjectOpenHashMap<IntArrayList[]> requestWordPositionsInDocuments(String[] words,
IntIntOpenHashMap docLengths) {
IntObjectOpenHashMap<IntArrayList[]> positionsInDocuments = new IntObjectOpenHashMap<IntArrayList[]>();
IntArrayList[] positionsInDocument = new IntArrayList[positions.length];
for (int i = 0; i < positionsInDocument.length; ++i) {
if (positions[i].length > 0) {
positionsInDocument[i] = new IntArrayList();
positionsInDocument[i].add(positions[i]);
}
}
positionsInDocuments.put(0, positionsInDocument);
docLengths.put(0, docLength);
return positionsInDocuments;
}
示例9: requestWordPositionsInDocuments
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
@Override
public IntObjectOpenHashMap<IntArrayList[]> requestWordPositionsInDocuments(String[] words,
IntIntOpenHashMap docLengths) {
IntObjectOpenHashMap<IntArrayList[]> positionsInDocuments = new IntObjectOpenHashMap<IntArrayList[]>();
IntArrayList[] positionsInDocument = new IntArrayList[positions.length];
for (int i = 0; i < positionsInDocument.length; ++i) {
if ((positions[i] != null) && (positions[i].length > 0)) {
positionsInDocument[i] = new IntArrayList();
positionsInDocument[i].add(positions[i]);
}
}
positionsInDocuments.put(0, positionsInDocument);
docLengths.put(0, docLength);
return positionsInDocuments;
}
示例10: mdsAlg
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
@Override
public AbstractMDSResult mdsAlg(Graph g) {
ThreadMXBean bean = ManagementFactory.getThreadMXBean();
long start = bean.getCurrentThreadCpuTime();
W = new IntOpenHashSet(g.getVertices());
G = new IntOpenHashSet(g.getVertices());
initialSize = (int) Math.ceil(g.getNumberOfVertices() * (1 / 0.65)) + 1;
neigNonG = new IntObjectOpenHashMap<>(initialSize);
neigN2 = new IntObjectOpenHashMap<>(initialSize);
howManyWhiteCanKeySee = new IntIntOpenHashMap(initialSize);
keyGrantsFlowerToValues = new IntObjectOpenHashMap<>(initialSize);
keyHasGrantedFlowerByValues = new IntObjectOpenHashMap<>(initialSize);
for (IntCursor vcur : W) {
IntOpenHashSet n1 = g.getN1(vcur.value);
neig.put(vcur.value, new IntOpenHashSet(n1));
neigNonG.put(vcur.value, new IntOpenHashSet(n1));
neigN2.put(vcur.value, new IntOpenHashSet(g.getN2(vcur.value)));
howManyWhiteCanKeySee.put(vcur.value, n1.size());
keyGrantsFlowerToValues.put(vcur.value, new IntOpenHashSet());
keyHasGrantedFlowerByValues.put(vcur.value, new IntOpenHashSet());
}
S = new IntOpenHashSet(initialSize);
iterations = 0;
prepTime = bean.getCurrentThreadCpuTime() - start;
markFlowers();
cleanDefiniteFlowers();
cleanPotentialFlowers();
skipped = 0;
cleanNonFlowers();
runTime = bean.getCurrentThreadCpuTime() - start;
System.out.println("Number of iterations: " + iterations);
System.out.println("Skipped: " + skipped);
MDSResultBackedByIntOpenHashSet result = new MDSResultBackedByIntOpenHashSet();
result.setResult(S);
return result;
}
示例11: checkMonotonicity
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
/**
* Throws an exception, if the hierarchy is not monotonic.
*
* TODO: This is a potentially expensive check that should be done when loading the hierarchy
*
* @param manager
*/
public void checkMonotonicity(DataManager manager) {
// Obtain dictionary
String[] dictionary = null;
String[] header = manager.getDataGeneralized().getHeader();
for (int i=0; i<header.length; i++) {
if (header[i].equals(attribute)) {
dictionary = manager.getDataGeneralized().getDictionary().getMapping()[i];
}
}
// Check
if (dictionary==null) {
throw new IllegalStateException("Cannot obtain dictionary for attribute ("+attribute+")");
}
// Level value -> level+1 value
final IntIntOpenHashMap hMap = new IntIntOpenHashMap();
// Input->level->output.
for (int level = 0; level < (map[0].length - 1); level++) {
hMap.clear();
for (int i = 0; i < map.length; i++) {
final int outputCurrentLevel = map[i][level];
final int outputNextLevel = map[i][level + 1];
if (hMap.containsKey(outputCurrentLevel)) {
final int compare = hMap.get(outputCurrentLevel);
if (compare != outputNextLevel) {
String in = dictionary[outputCurrentLevel];
String out1 = dictionary[compare];
String out2 = dictionary[outputNextLevel];
throw new IllegalArgumentException("The transformation rule for the attribute '" + attribute + "' is not a hierarchy. ("+in+") can either be transformed to ("+out1+") or to ("+out2+")");
}
} else {
hMap.put(outputCurrentLevel, outputNextLevel);
}
}
}
}
示例12: initialize
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
public void initialize () {
if (random == null) {
random = new Randoms();
}
gammaSum = gamma * numStates;
stateTopicCounts = new int[numStates][numTopics];
stateTopicTotals = new int[numStates];
stateStateTransitions = new int[numStates][numStates];
stateTransitionTotals = new int[numStates];
pi = 1000.0;
sumPi = numStates * pi;
int maxTokens = 0;
int totalTokens = 0;
numSequences = 0;
int sequenceID;
int currentSequenceID = -1;
// The code to cache topic distributions
// takes an int-int hashmap as a mask to only update
// the distributions for topics that have actually changed.
// Here we create a dummy count hash that has all the topics.
IntIntOpenHashMap allTopicsDummy = new IntIntOpenHashMap();
for (int topic = 0; topic < numTopics; topic++) {
allTopicsDummy.put(topic, 1);
}
for (int state=0; state < numStates; state++) {
recacheStateTopicDistribution(state, allTopicsDummy);
}
for (int doc = 0; doc < numDocs; doc++) {
sampleState(doc, random, true);
}
}
示例13: trimFeatures
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
public void trimFeatures(Logger log, float threshold)
{
FloatArrayList tWeights = new FloatArrayList(f_weights.size());
IntIntOpenHashMap map = new IntIntOpenHashMap();
ObjectIntHashMap<String> m;
int i, j, tFeatures = 1;
boolean trim;
String s;
log.info("Trimming: ");
// bias
for (j=0; j<n_labels; j++)
tWeights.add(f_weights.get(j));
// rest
for (i=1; i<n_features; i++)
{
trim = true;
for (j=0; j<n_labels; j++)
{
if (Math.abs(f_weights.get(i*n_labels+j)) > threshold)
{
trim = false;
break;
}
}
if (!trim)
{
map.put(i, tFeatures++);
for (j=0; j<n_labels; j++)
tWeights.add(f_weights.get(i*n_labels+j));
}
}
log.info(String.format("%d -> %d\n", n_features, tFeatures));
tWeights.trimToSize();
// map
for (String type : Lists.newArrayList(m_features.keySet()))
{
m = m_features.get(type);
for (ObjectIntPair<String> p : m.toList())
{
i = map.get(p.i);
s = (String)p.o;
if (i > 0) m.put(s, i);
else m.remove(s);
}
if (m.isEmpty())
m_features.remove(type);
}
f_weights = tWeights;
n_features = tFeatures;
}
示例14: normalizeIndices
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
/**
* Normalizes co-indices and gap-indices of the specific tree.
* @param tree the tree to be normalized.
*/
static public void normalizeIndices(CTTree tree)
{
// retrieve all co-indexes
IntObjectOpenHashMap<List<CTNode>> mOrg = new IntObjectOpenHashMap<List<CTNode>>();
getCoIndexMap(tree.getRoot(), mOrg);
if (mOrg.isEmpty()) return;
int[] keys = mOrg.keys().toArray();
Arrays.sort(keys);
IntIntOpenHashMap mNew = new IntIntOpenHashMap();
int coIndex = 1, last, i;
List<CTNode> list;
CTNode curr, ec;
boolean isAnteFound;
for (int key : keys)
{
list = mOrg.get(key);
last = list.size() - 1;
isAnteFound = false;
for (i=last; i>=0; i--)
{
curr = list.get(i);
if (curr.isEmptyCategoryRec())
{
ec = curr.getSubTerminals().get(0);
if (i == last || isAnteFound || CTLibEn.RE_ICH_PPA_RNR.matcher(ec.form).find() || CTLibEn.containsCoordination(curr.getLowestCommonAncestor(list.get(i+1))))
curr.coIndex = -1;
else
curr.coIndex = coIndex++;
if (isAnteFound || i > 0)
ec.form += "-"+coIndex;
}
else if (isAnteFound)
{
curr.coIndex = -1;
}
else
{
curr.coIndex = coIndex;
mNew.put(key, coIndex);
isAnteFound = true;
}
}
coIndex++;
}
int[] lastIndex = {coIndex};
remapGapIndices(mNew, lastIndex, tree.getRoot());
}
示例15: mdsAlg
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
@Override
public AbstractMDSResult mdsAlg(Graph g) {
ThreadMXBean bean = ManagementFactory.getThreadMXBean();
long start = bean.getCurrentThreadCpuTime();
W = new IntOpenHashSet(g.getVertices());
G = new IntOpenHashSet(g.getVertices());
initialSize = (int) Math.ceil(g.getNumberOfVertices() * (1 / 0.65)) + 1;
neigNonG = new IntObjectOpenHashMap<>(initialSize);
neigN2 = new IntObjectOpenHashMap<>(initialSize);
howManyWhiteCanKeySee = new IntIntOpenHashMap(initialSize);
keyGrantsFlowerToValues = new IntObjectOpenHashMap<>(initialSize);
keyHasGrantedFlowerByValues = new IntObjectOpenHashMap<>(initialSize);
for (IntCursor vcur : W) {
IntOpenHashSet n1 = g.getN1(vcur.value);
neig.put(vcur.value, new IntOpenHashSet(n1));
neigNonG.put(vcur.value, new IntOpenHashSet(n1));
neigN2.put(vcur.value, new IntOpenHashSet(g.getN2(vcur.value)));
howManyWhiteCanKeySee.put(vcur.value, n1.size());
keyGrantsFlowerToValues.put(vcur.value, new IntOpenHashSet());
keyHasGrantedFlowerByValues.put(vcur.value, new IntOpenHashSet());
}
for (IntCursor wcur : W) {
IntOpenHashSet neig3set = new IntOpenHashSet();
for (IntCursor n1cur : neig.get(wcur.value)) {
if (neig3set.contains(n1cur.value)) {
continue;
}
neig3set.add(n1cur.value);
for (IntCursor n2cur : neig.get(n1cur.value)) {
if (neig3set.contains(n2cur.value)) {
continue;
}
neig3set.add(n2cur.value);
for (IntCursor n3cur : neig.get(n2cur.value)) {
neig3set.add(n3cur.value);
// for (IntCursor n4cur : neig.get(n3cur.value)) {
// verticesToRecompute.add(n4cur.value);
// }
}
}
}
neigN3.put(wcur.value, neig3set);
}
sweepCardinality = new IntIntOpenHashMap(initialSize);
recomputeSweepCardinalityFor(W);
S = new IntOpenHashSet(initialSize);
iterations = 0;
prepTime = bean.getCurrentThreadCpuTime() - start;
markFlowers();
cleanDefiniteFlowers();
cleanPotentialFlowers();
skipped = 0;
cleanNonFlowers();
runTime = bean.getCurrentThreadCpuTime() - start;
System.out.println("Number of iterations: " + iterations);
System.out.println("Skipped: " + skipped);
MDSResultBackedByIntOpenHashSet result = new MDSResultBackedByIntOpenHashSet();
result.setResult(S);
return result;
}