本文整理汇总了Java中com.carrotsearch.hppc.IntIntOpenHashMap.get方法的典型用法代码示例。如果您正苦于以下问题:Java IntIntOpenHashMap.get方法的具体用法?Java IntIntOpenHashMap.get怎么用?Java IntIntOpenHashMap.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.carrotsearch.hppc.IntIntOpenHashMap
的用法示例。
在下文中一共展示了IntIntOpenHashMap.get方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: remapGapIndices
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
/** Called by {@link CTReader#normalizeIndices(CTTree)}. */
static private void remapGapIndices(IntIntOpenHashMap map, int[] lastIndex, CTNode curr)
{
int gapIndex = curr.gapIndex;
if (map.containsKey(gapIndex))
{
curr.gapIndex = map.get(gapIndex);
}
else if (gapIndex != -1)
{
curr.gapIndex = lastIndex[0];
map.put(gapIndex, lastIndex[0]++);
}
for (CTNode child : curr.ls_children)
remapGapIndices(map, lastIndex, child);
}
示例2: aug
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
private static boolean aug(ObjectOpenHashSet<Edge> edges, int white,
IntOpenHashSet visitedBlack, IntIntOpenHashMap match,
IntOpenHashSet blackSet) {
for (IntCursor blackcur : blackSet) {
boolean contains = false;
final int black = blackcur.value;
for (ObjectCursor<Edge> ecur : edges) {
if ((white == ecur.value.from)
&& (black == ecur.value.to)) {
contains = true;
}
}
if (contains && !visitedBlack.contains(black)) {
visitedBlack.add(black);
if (match.get(black) == -1L
|| aug(edges, match.get(black), visitedBlack, match,
blackSet)) {
match.put(black, white);
return true;
}
}
}
return false;
}
示例3: setRawEdges
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
public void setRawEdges(int[] edgesFrom, int[] edgesTo, int edgesCount) {
this.edgesCount = edgesCount;
IntIntOpenHashMap inputVerticesMap = new IntIntOpenHashMap(
edgesCount >> 4);
int currentVertexNumber = 1;
for (int i = 0; i < edgesCount; i++) {
if (inputVerticesMap.putIfAbsent(edgesFrom[i], currentVertexNumber)) {
currentVertexNumber++;
}
if (inputVerticesMap.putIfAbsent(edgesTo[i], currentVertexNumber)) {
currentVertexNumber++;
}
}
maxVertexNumber = currentVertexNumber - 1;
this.edgesFrom = new int[edgesCount];
this.edgesTo = new int[edgesCount];
// System.out.println(Utils.largeIntArrayToString(edgesFrom));
// System.out.println(Utils.largeIntArrayToString(edgesTo));
for (int i = 0; i < edgesCount; i++) {
this.edgesFrom[i] = inputVerticesMap.get(edgesFrom[i]);
this.edgesTo[i] = inputVerticesMap.get(edgesTo[i]);
}
// System.out.println(Utils.largeIntArrayToString(this.edgesFrom));
// System.out.println(Utils.largeIntArrayToString(this.edgesTo));
areRawEdgesSet = true;
}
示例4: fromMap
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
public static IntDistribution fromMap(IntIntOpenHashMap counts) {
double values[] = new double[counts.assigned];
int keys[] = counts.keys().toArray();
Arrays.sort(keys);
for (int j = 0; j < keys.length; ++j) {
values[j] = counts.get(keys[j]);
}
return new IntDistribution(keys, values);
}
示例5: checkMonotonicity
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
/**
* Throws an exception, if the hierarchy is not monotonic.
*
* TODO: This is a potentially expensive check that should be done when loading the hierarchy
*
* @param manager
*/
public void checkMonotonicity(DataManager manager) {
// Obtain dictionary
String[] dictionary = null;
String[] header = manager.getDataGeneralized().getHeader();
for (int i=0; i<header.length; i++) {
if (header[i].equals(attribute)) {
dictionary = manager.getDataGeneralized().getDictionary().getMapping()[i];
}
}
// Check
if (dictionary==null) {
throw new IllegalStateException("Cannot obtain dictionary for attribute ("+attribute+")");
}
// Level value -> level+1 value
final IntIntOpenHashMap hMap = new IntIntOpenHashMap();
// Input->level->output.
for (int level = 0; level < (map[0].length - 1); level++) {
hMap.clear();
for (int i = 0; i < map.length; i++) {
final int outputCurrentLevel = map[i][level];
final int outputNextLevel = map[i][level + 1];
if (hMap.containsKey(outputCurrentLevel)) {
final int compare = hMap.get(outputCurrentLevel);
if (compare != outputNextLevel) {
String in = dictionary[outputCurrentLevel];
String out1 = dictionary[compare];
String out2 = dictionary[outputNextLevel];
throw new IllegalArgumentException("The transformation rule for the attribute '" + attribute + "' is not a hierarchy. ("+in+") can either be transformed to ("+out1+") or to ("+out2+")");
}
} else {
hMap.put(outputCurrentLevel, outputNextLevel);
}
}
}
}
示例6: MultinomialHMM
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
public MultinomialHMM (int numberOfTopics, String topicsFilename, int numStates) throws IOException {
formatter = NumberFormat.getInstance();
formatter.setMaximumFractionDigits(5);
System.out.println("LDA HMM: " + numberOfTopics);
documentTopics = new IntObjectOpenHashMap<IntIntOpenHashMap>();
this.numTopics = numberOfTopics;
this.alphaSum = numberOfTopics;
this.alpha = new double[numberOfTopics];
Arrays.fill(alpha, alphaSum / numTopics);
topicKeys = new String[numTopics];
// This initializes numDocs as well
loadTopicsFromFile(topicsFilename);
documentStates = new int[ numDocs ];
documentSequenceIDs = new int[ numDocs ];
maxTokensPerTopic = new int[ numTopics ];
maxDocLength = 0;
//int[] histogram = new int[380];
//int totalTokens = 0;
for (int doc=0; doc < numDocs; doc++) {
if (! documentTopics.containsKey(doc)) { continue; }
IntIntOpenHashMap topicCounts = documentTopics.get(doc);
int count = 0;
for (int topic: topicCounts.keys().toArray()) {
int topicCount = topicCounts.get(topic);
//histogram[topicCount]++;
//totalTokens += topicCount;
if (topicCount > maxTokensPerTopic[topic]) {
maxTokensPerTopic[topic] = topicCount;
}
count += topicCount;
}
if (count > maxDocLength) {
maxDocLength = count;
}
}
/*
double runningTotal = 0.0;
for (int i=337; i >= 0; i--) {
runningTotal += i * histogram[i];
System.out.format("%d\t%d\t%.3f\n", i, histogram[i],
runningTotal / totalTokens);
}
*/
this.numStates = numStates;
this.initialStateCounts = new int[numStates];
topicLogGammaCache = new double[numStates][numTopics][];
for (int state=0; state < numStates; state++) {
for (int topic=0; topic < numTopics; topic++) {
topicLogGammaCache[state][topic] = new double[ maxTokensPerTopic[topic] + 1 ];
//topicLogGammaCache[state][topic] = new double[21];
}
}
System.out.println( maxDocLength );
docLogGammaCache = new double[numStates][ maxDocLength + 1 ];
}
示例7: sampleTopicsForOneTestDocAll
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
private void sampleTopicsForOneTestDocAll(FeatureSequence tokenSequence,
LabelSequence topicSequence) {
// TODO Auto-generated method stub
int[] oneDocTopics = topicSequence.getFeatures();
IntIntOpenHashMap currentTypeTopicCounts;
int type, oldTopic, newTopic;
double tw;
double[] topicWeights = new double[numTopics];
double topicWeightsSum;
int docLength = tokenSequence.getLength();
// populate topic counts
int[] localTopicCounts = new int[numTopics];
for (int ti = 0; ti < numTopics; ti++){
localTopicCounts[ti] = 0;
}
for (int position = 0; position < docLength; position++) {
localTopicCounts[oneDocTopics[position]] ++;
}
// Iterate over the positions (words) in the document
for (int si = 0; si < docLength; si++) {
type = tokenSequence.getIndexAtPosition(si);
oldTopic = oneDocTopics[si];
// Remove this token from all counts
localTopicCounts[oldTopic] --;
currentTypeTopicCounts = typeTopicCounts[type];
assert(currentTypeTopicCounts.get(oldTopic) >= 0);
if (currentTypeTopicCounts.get(oldTopic) == 1) {
currentTypeTopicCounts.remove(oldTopic);
}
else {
currentTypeTopicCounts.addTo(oldTopic, -1);
}
tokensPerTopic[oldTopic]--;
// Build a distribution over topics for this token
Arrays.fill (topicWeights, 0.0);
topicWeightsSum = 0;
for (int ti = 0; ti < numTopics; ti++) {
tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
* ((localTopicCounts[ti] + alpha[ti])); // (/docLen-1+tAlpha); is constant across all topics
topicWeightsSum += tw;
topicWeights[ti] = tw;
}
// Sample a topic assignment from this distribution
newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);
// Put that new topic into the counts
oneDocTopics[si] = newTopic;
currentTypeTopicCounts.putOrAdd(newTopic, 1, 1);
localTopicCounts[newTopic] ++;
tokensPerTopic[newTopic]++;
}
}
示例8: sampleTopicsForOneTestDoc
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
private void sampleTopicsForOneTestDoc(FeatureSequence tokenSequence,
LabelSequence topicSequence) {
// TODO Auto-generated method stub
int[] oneDocTopics = topicSequence.getFeatures();
IntIntOpenHashMap currentTypeTopicCounts;
int type, oldTopic, newTopic;
double tw;
double[] topicWeights = new double[numTopics];
double topicWeightsSum;
int docLength = tokenSequence.getLength();
// populate topic counts
int[] localTopicCounts = new int[numTopics];
for (int ti = 0; ti < numTopics; ti++){
localTopicCounts[ti] = 0;
}
for (int position = 0; position < docLength; position++) {
if(oneDocTopics[position] != -1) {
localTopicCounts[oneDocTopics[position]] ++;
}
}
// Iterate over the positions (words) in the document
for (int si = 0; si < docLength; si++) {
type = tokenSequence.getIndexAtPosition(si);
oldTopic = oneDocTopics[si];
if(oldTopic == -1) {
continue;
}
// Remove this token from all counts
localTopicCounts[oldTopic] --;
currentTypeTopicCounts = typeTopicCounts[type];
assert(currentTypeTopicCounts.get(oldTopic) >= 0);
if (currentTypeTopicCounts.get(oldTopic) == 1) {
currentTypeTopicCounts.remove(oldTopic);
}
else {
currentTypeTopicCounts.addTo(oldTopic, -1);
}
tokensPerTopic[oldTopic]--;
// Build a distribution over topics for this token
Arrays.fill (topicWeights, 0.0);
topicWeightsSum = 0;
for (int ti = 0; ti < numTopics; ti++) {
tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
* ((localTopicCounts[ti] + alpha[ti])); // (/docLen-1+tAlpha); is constant across all topics
topicWeightsSum += tw;
topicWeights[ti] = tw;
}
// Sample a topic assignment from this distribution
newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);
// Put that new topic into the counts
oneDocTopics[si] = newTopic;
currentTypeTopicCounts.putOrAdd(newTopic, 1, 1);
localTopicCounts[newTopic] ++;
tokensPerTopic[newTopic]++;
}
}
示例9: sampleTopicsForOneDocWithTheta
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
private void sampleTopicsForOneDocWithTheta(FeatureSequence tokenSequence,
LabelSequence topicSequence, double[] topicDistribution) {
// TODO Auto-generated method stub
int[] oneDocTopics = topicSequence.getFeatures();
IntIntOpenHashMap currentTypeTopicCounts;
int type, oldTopic, newTopic;
double tw;
double[] topicWeights = new double[numTopics];
double topicWeightsSum;
int docLength = tokenSequence.getLength();
// Iterate over the positions (words) in the document
for (int si = 0; si < docLength; si++) {
type = tokenSequence.getIndexAtPosition(si);
oldTopic = oneDocTopics[si];
if(oldTopic == -1) {
continue;
}
currentTypeTopicCounts = typeTopicCounts[type];
assert(currentTypeTopicCounts.get(oldTopic) >= 0);
if (currentTypeTopicCounts.get(oldTopic) == 1) {
currentTypeTopicCounts.remove(oldTopic);
}
else {
currentTypeTopicCounts.addTo(oldTopic, -1);
}
tokensPerTopic[oldTopic]--;
// Build a distribution over topics for this token
Arrays.fill (topicWeights, 0.0);
topicWeightsSum = 0;
for (int ti = 0; ti < numTopics; ti++) {
tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
* topicDistribution[ti]; // (/docLen-1+tAlpha); is constant across all topics
topicWeightsSum += tw;
topicWeights[ti] = tw;
}
// Sample a topic assignment from this distribution
newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);
// Put that new topic into the counts
oneDocTopics[si] = newTopic;
currentTypeTopicCounts.putOrAdd(newTopic, 1, 1);
tokensPerTopic[newTopic]++;
}
}
示例10: dirichletMultinomialLikelihoodRatio
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
/** What is the probability that these two observations were drawn from
* the same multinomial with symmetric Dirichlet prior alpha, relative
* to the probability that they were drawn from different multinomials
* both drawn from this Dirichlet?
*/
public static double dirichletMultinomialLikelihoodRatio(IntIntOpenHashMap countsX,
IntIntOpenHashMap countsY,
double alpha, double alphaSum) {
// The likelihood for one DCM is
// Gamma( alpha_sum ) prod Gamma( alpha + N_i )
// prod Gamma ( alpha ) Gamma ( alpha_sum + N )
// When we divide this by the product of two other DCMs with the same
// alpha parameter, the first term in the numerator cancels with the
// first term in the denominator. Then moving the remaining alpha-only
// term to the numerator, we get
// prod Gamma(alpha) prod Gamma( alpha + X_i + Y_i )
// Gamma (alpha_sum) Gamma( alpha_sum + X_sum + Y_sum )
// ----------------------------------------------------------
// prod Gamma(alpha + X_i) prod Gamma(alpha + Y_i)
// Gamma( alpha_sum + X_sum ) Gamma( alpha_sum + Y_sum )
double logLikelihood = 0.0;
double logGammaAlpha = logGamma(alpha);
int totalX = 0;
int totalY = 0;
int key, x, y;
IntOpenHashSet distinctKeys = new IntOpenHashSet();
distinctKeys.addAll(countsX.keys());
distinctKeys.addAll(countsY.keys());
Iterator<IntCursor> iterator = distinctKeys.iterator();
while (iterator.hasNext()) {
key = iterator.next().value;
x = 0;
if (countsX.containsKey(key)) {
x = countsX.get(key);
}
y = 0;
if (countsY.containsKey(key)) {
y = countsY.get(key);
}
totalX += x;
totalY += y;
logLikelihood += logGamma(alpha) + logGamma(alpha + x + y)
- logGamma(alpha + x) - logGamma(alpha + y);
}
logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY)
- logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY);
return logLikelihood;
}
示例11: trimFeatures
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
public void trimFeatures(Logger log, float threshold)
{
FloatArrayList tWeights = new FloatArrayList(f_weights.size());
IntIntOpenHashMap map = new IntIntOpenHashMap();
ObjectIntHashMap<String> m;
int i, j, tFeatures = 1;
boolean trim;
String s;
log.info("Trimming: ");
// bias
for (j=0; j<n_labels; j++)
tWeights.add(f_weights.get(j));
// rest
for (i=1; i<n_features; i++)
{
trim = true;
for (j=0; j<n_labels; j++)
{
if (Math.abs(f_weights.get(i*n_labels+j)) > threshold)
{
trim = false;
break;
}
}
if (!trim)
{
map.put(i, tFeatures++);
for (j=0; j<n_labels; j++)
tWeights.add(f_weights.get(i*n_labels+j));
}
}
log.info(String.format("%d -> %d\n", n_features, tFeatures));
tWeights.trimToSize();
// map
for (String type : Lists.newArrayList(m_features.keySet()))
{
m = m_features.get(type);
for (ObjectIntPair<String> p : m.toList())
{
i = map.get(p.i);
s = (String)p.o;
if (i > 0) m.put(s, i);
else m.remove(s);
}
if (m.isEmpty())
m_features.remove(type);
}
f_weights = tWeights;
n_features = tFeatures;
}