当前位置: 首页>>代码示例>>Java>>正文


Java TIntIntHashMap.get方法代码示例

本文整理汇总了Java中gnu.trove.map.hash.TIntIntHashMap.get方法的典型用法代码示例。如果您正苦于以下问题:Java TIntIntHashMap.get方法的具体用法?Java TIntIntHashMap.get怎么用?Java TIntIntHashMap.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在gnu.trove.map.hash.TIntIntHashMap的用法示例。


在下文中一共展示了TIntIntHashMap.get方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: notUseAfterLastDef

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
private boolean notUseAfterLastDef(int reg,
        MachineBasicBlock mbb,
        int dist,
        TIntIntHashMap distanceMap,
        OutParamWrapper<Integer> lastDef)
{
    lastDef.set(0);
    int lastUse = dist;
    for (DefUseChainIterator itr = mri.getRegIterator(reg); itr.hasNext();)
    {
        MachineOperand mo = itr.getOpearnd();
        MachineInstr mi = itr.getMachineInstr();
        if (!mi.getParent().equals(mbb))
            continue;
        if (!distanceMap.containsKey(mi.index()))
            continue;

        if (mo.isUse() && distanceMap.get(mi.index()) < lastUse)
            lastUse = distanceMap.get(mi.index());
        if (mo.isDef() && distanceMap.get(mi.index()) > lastDef.get())
            lastDef.set(distanceMap.get(mi.index()));
        itr.next();
    }
    return (!(lastUse > lastDef.get() && lastUse < dist));
}
 
开发者ID:JianpingZeng,项目名称:xcc,代码行数:26,代码来源:TwoAddrInstructionPass.java

示例2: train

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
@Override
public <O, A> void train(List<? extends Annotated<O, A>> data) {
	TIntIntHashMap nAnnotationCounts = new TIntIntHashMap();
	int maxVal = 0;
	
	for (Annotated<O, A> sample : data) {
		Collection<A> annos = sample.getAnnotations();

		nAnnotationCounts.adjustOrPutValue(annos.size(), 1, 1);
		
		if (annos.size()>maxVal) maxVal = annos.size();
	}
	
	//build distribution and rng for the number of annotations
	double [] distr = new double[maxVal+1];
	for (int i=0; i<=maxVal; i++) 
		distr[i] = nAnnotationCounts.get(i);
	numAnnotations = new EmpiricalWalker(distr, Empirical.NO_INTERPOLATION, new MersenneTwister());
}
 
开发者ID:openimaj,项目名称:openimaj,代码行数:20,代码来源:PriorChooser.java

示例3: isProfitableToReMat

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
private boolean isProfitableToReMat(int reg,
        TargetRegisterClass rc,
        MachineInstr mi,
        MachineInstr defMI,
        MachineBasicBlock mbb,
        int loc,
        TIntIntHashMap distanceMap)
{
    boolean otherUse = false;
    for (DefUseChainIterator itr = mri.getUseIterator(reg); itr.hasNext(); )
    {
        MachineOperand useMO = itr.getOpearnd();
        MachineInstr useMI = itr.getMachineInstr();
        MachineBasicBlock useBB = useMI.getParent();
        if (useBB.equals(mbb))
        {
            int idx = useMI.index();
            if (distanceMap.containsKey(idx) && distanceMap.get(idx) == loc)
            {
                otherUse = true;
                if (isTwoAddrUse(useMI, reg))
                    return true;
            }
        }
    }

    if (otherUse)
        return false;

    return mbb.equals(defMI.getParent());
}
 
开发者ID:JianpingZeng,项目名称:xcc,代码行数:32,代码来源:TwoAddrInstructionPass.java

示例4: merge

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
private void merge(TIntIntHashMap linked, int start, int target) {
	if (start == target)
		return;

	final int old = linked.get(start);

	if (old > target) {
		linked.put(start, target);
		merge(linked, old, target);
	} else {
		merge(linked, target, old);
	}
}
 
开发者ID:openimaj,项目名称:openimaj,代码行数:14,代码来源:ConnectedComponentLabeler.java

示例5: getCharWidth

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
private static int getCharWidth(Font font, char ch, TIntIntHashMap map) {
	int width = map.get(ch);
	if (width == 0) {
		width = Fonts.getFontMetrics(font).charWidth(ch);
		if (width == 0) {
			width = 1;
		}
		map.put(ch, width);
	}
	return width;
}
 
开发者ID:Ayutac,项目名称:toolkit,代码行数:12,代码来源:TextDrawing.java

示例6: get

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
public int get(final String groupId, final int timepoint) {
	final TIntIntHashMap countResult = countResults.get(groupId);
	if (countResult == null) {
		return defaultValue;
	} else {
		return countResult.get(timepoint);
	}
}
 
开发者ID:pmeisen,项目名称:dis-timeintervaldataanalyzer,代码行数:9,代码来源:CountValuesCollection.java

示例7: buildFeatureVector

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
private void buildFeatureVector(int item_id, TIntIntHashMap paths) {

        try {
            double rate = 0;
            double n = 1;
            boolean training = false, validation = false;
            DecimalFormat form = new DecimalFormat("#.####");
            form.setRoundingMode(RoundingMode.CEILING);
            StringBuffer str = new StringBuffer();

            if (trainRatings.containsKey(item_id)) {
                training = true;
                rate = trainRatings.get(item_id);
            } else if (validationRatings.containsKey(item_id)) {
                validation = true;
                rate = validationRatings.get(item_id);
            }

            if (normalize)
                n = norm(paths);

            str.append(rate + " qid:" + user_id + " 1:" + item_id + " ");

            for (int i = 1; i <= path_index.size(); i++) {

                int count = 0;

                if (paths.size() == 1)
                    n = norm(paths);

                if (paths.containsKey(i)) {

                    count = paths.get(i);

                    if (normalize)
                        str.append(i + 1 + ":"
                                + form.format(count / n).replace(",", ".")
                                + " ");
                    else
                        str.append(i + 1 + ":" + count + " ");

                }
            }

            if (training) {
                synchronized (train_file) {
                    train_file.append(str);
                    train_file.newLine();
                }
            } else if (validation) {
                synchronized (validation_file) {
                    validation_file.append(str);
                    validation_file.newLine();
                }
            } else {
                synchronized (test_file) {
                    test_file.append(str);
                    test_file.newLine();
                }
            }

        } catch (Exception e) {
            e.printStackTrace();
        }
    }
 
开发者ID:sisinflab,项目名称:lodreclib,代码行数:66,代码来源:UserPathExtractorWorker.java

示例8: MultinomialHMM

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
public MultinomialHMM (int numberOfTopics, String topicsFilename, int numStates) throws IOException {
formatter = NumberFormat.getInstance();
formatter.setMaximumFractionDigits(5);

System.out.println("LDA HMM: " + numberOfTopics);

documentTopics = new TIntObjectHashMap<TIntIntHashMap>();

this.numTopics = numberOfTopics;
this.alphaSum = numberOfTopics;
this.alpha = new double[numberOfTopics];
Arrays.fill(alpha, alphaSum / numTopics);

topicKeys = new String[numTopics];

// This initializes numDocs as well
loadTopicsFromFile(topicsFilename);

documentStates = new int[ numDocs ];
documentSequenceIDs = new int[ numDocs ];

maxTokensPerTopic = new int[ numTopics ];
maxDocLength = 0;

//int[] histogram = new int[380];
//int totalTokens = 0;

for (int doc=0; doc < numDocs; doc++) {
    if (! documentTopics.containsKey(doc)) { continue; }
    
    TIntIntHashMap topicCounts = documentTopics.get(doc);
    
    int count = 0;
    for (int topic: topicCounts.keys()) {
	int topicCount = topicCounts.get(topic);
	//histogram[topicCount]++;
	//totalTokens += topicCount;

	if (topicCount > maxTokensPerTopic[topic]) {
	    maxTokensPerTopic[topic] = topicCount;
	}
	count += topicCount;
    }
    if (count > maxDocLength) {
	maxDocLength = count;
    }
}

/*
double runningTotal = 0.0;
for (int i=337; i >= 0; i--) {
    runningTotal += i * histogram[i];
    System.out.format("%d\t%d\t%.3f\n", i, histogram[i], 
		      runningTotal / totalTokens);
}
*/

this.numStates = numStates; 
this.initialStateCounts = new int[numStates];

topicLogGammaCache = new double[numStates][numTopics][];
for (int state=0; state < numStates; state++) {
    for (int topic=0; topic < numTopics; topic++) {
	topicLogGammaCache[state][topic] = new double[ maxTokensPerTopic[topic] + 1 ];
	//topicLogGammaCache[state][topic] = new double[21];

    }
}
System.out.println( maxDocLength );
docLogGammaCache = new double[numStates][ maxDocLength + 1 ];

   }
 
开发者ID:iamxiatian,项目名称:wikit,代码行数:73,代码来源:MultinomialHMM.java

示例9: sampleTopicsForOneTestDocAll

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
private void sampleTopicsForOneTestDocAll(FeatureSequence tokenSequence,
		LabelSequence topicSequence) {
	// TODO Auto-generated method stub
	int[] oneDocTopics = topicSequence.getFeatures();

	TIntIntHashMap currentTypeTopicCounts;
	int type, oldTopic, newTopic;
	double tw;
	double[] topicWeights = new double[numTopics];
	double topicWeightsSum;
	int docLength = tokenSequence.getLength();

	//		populate topic counts
	int[] localTopicCounts = new int[numTopics];
	for (int ti = 0; ti < numTopics; ti++){
		localTopicCounts[ti] = 0;
	}
	for (int position = 0; position < docLength; position++) {
		localTopicCounts[oneDocTopics[position]] ++;
	}

	// Iterate over the positions (words) in the document
	for (int si = 0; si < docLength; si++) {
		type = tokenSequence.getIndexAtPosition(si);
		oldTopic = oneDocTopics[si];

		// Remove this token from all counts
		localTopicCounts[oldTopic] --;

		currentTypeTopicCounts = typeTopicCounts[type];
		assert(currentTypeTopicCounts.get(oldTopic) >= 0);

		if (currentTypeTopicCounts.get(oldTopic) == 1) {
			currentTypeTopicCounts.remove(oldTopic);
		}
		else {
			currentTypeTopicCounts.adjustValue(oldTopic, -1);
		}
		tokensPerTopic[oldTopic]--;

		// Build a distribution over topics for this token
		Arrays.fill (topicWeights, 0.0);
		topicWeightsSum = 0;

		for (int ti = 0; ti < numTopics; ti++) {
			tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
			      * ((localTopicCounts[ti] + alpha[ti])); // (/docLen-1+tAlpha); is constant across all topics
			topicWeightsSum += tw;
			topicWeights[ti] = tw;
		}
		// Sample a topic assignment from this distribution
		newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);

		// Put that new topic into the counts
		oneDocTopics[si] = newTopic;
		currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
		localTopicCounts[newTopic] ++;
		tokensPerTopic[newTopic]++;
	}
}
 
开发者ID:iamxiatian,项目名称:wikit,代码行数:61,代码来源:LDAStream.java

示例10: sampleTopicsForOneTestDoc

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
private void sampleTopicsForOneTestDoc(FeatureSequence tokenSequence,
		LabelSequence topicSequence) {
	// TODO Auto-generated method stub
	int[] oneDocTopics = topicSequence.getFeatures();

	TIntIntHashMap currentTypeTopicCounts;
	int type, oldTopic, newTopic;
	double tw;
	double[] topicWeights = new double[numTopics];
	double topicWeightsSum;
	int docLength = tokenSequence.getLength();

	//		populate topic counts
	int[] localTopicCounts = new int[numTopics];
	for (int ti = 0; ti < numTopics; ti++){
		localTopicCounts[ti] = 0;
	}
	for (int position = 0; position < docLength; position++) {
		if(oneDocTopics[position] != -1) {
			localTopicCounts[oneDocTopics[position]] ++;
		}
	}

	// Iterate over the positions (words) in the document
	for (int si = 0; si < docLength; si++) {
		type = tokenSequence.getIndexAtPosition(si);
		oldTopic = oneDocTopics[si];
		if(oldTopic == -1) {
			continue;
		}

		// Remove this token from all counts
    		localTopicCounts[oldTopic] --;
    		currentTypeTopicCounts = typeTopicCounts[type];
		assert(currentTypeTopicCounts.get(oldTopic) >= 0);

		if (currentTypeTopicCounts.get(oldTopic) == 1) {
			currentTypeTopicCounts.remove(oldTopic);
		}
		else {
			currentTypeTopicCounts.adjustValue(oldTopic, -1);
		}
		tokensPerTopic[oldTopic]--;

		// Build a distribution over topics for this token
		Arrays.fill (topicWeights, 0.0);
		topicWeightsSum = 0;

		for (int ti = 0; ti < numTopics; ti++) {
			tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
			      * ((localTopicCounts[ti] + alpha[ti])); // (/docLen-1+tAlpha); is constant across all topics
			topicWeightsSum += tw;
			topicWeights[ti] = tw;
		}
		// Sample a topic assignment from this distribution
		newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);

		// Put that new topic into the counts
		oneDocTopics[si] = newTopic;
		currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
		localTopicCounts[newTopic] ++;
		tokensPerTopic[newTopic]++;
	}
}
 
开发者ID:iamxiatian,项目名称:wikit,代码行数:65,代码来源:LDAStream.java

示例11: sampleTopicsForOneDocWithTheta

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
private void sampleTopicsForOneDocWithTheta(FeatureSequence tokenSequence,
		LabelSequence topicSequence, double[] topicDistribution) {
	// TODO Auto-generated method stub
	int[] oneDocTopics = topicSequence.getFeatures();

	TIntIntHashMap currentTypeTopicCounts;
	int type, oldTopic, newTopic;
	double tw;
	double[] topicWeights = new double[numTopics];
	double topicWeightsSum;
	int docLength = tokenSequence.getLength();
	
	// Iterate over the positions (words) in the document
	for (int si = 0; si < docLength; si++) {
		type = tokenSequence.getIndexAtPosition(si);
		oldTopic = oneDocTopics[si];
		if(oldTopic == -1) {
			continue;
		}

 		currentTypeTopicCounts = typeTopicCounts[type];
		assert(currentTypeTopicCounts.get(oldTopic) >= 0);

		if (currentTypeTopicCounts.get(oldTopic) == 1) {
			currentTypeTopicCounts.remove(oldTopic);
		}
		else {
			currentTypeTopicCounts.adjustValue(oldTopic, -1);
		}
		tokensPerTopic[oldTopic]--;

		// Build a distribution over topics for this token
		Arrays.fill (topicWeights, 0.0);
		topicWeightsSum = 0;

		for (int ti = 0; ti < numTopics; ti++) {
			tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
			      * topicDistribution[ti]; // (/docLen-1+tAlpha); is constant across all topics
			topicWeightsSum += tw;
			topicWeights[ti] = tw;
		}
		// Sample a topic assignment from this distribution
		newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);

		// Put that new topic into the counts
		oneDocTopics[si] = newTopic;
		currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
		tokensPerTopic[newTopic]++;
	}
}
 
开发者ID:iamxiatian,项目名称:wikit,代码行数:51,代码来源:LDAStream.java

示例12: dirichletMultinomialLikelihoodRatio

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
/** What is the probability that these two observations were drawn from
	 *	the same multinomial with symmetric Dirichlet prior alpha, relative 
	 *	to the probability that they were drawn from different multinomials
	 *	both drawn from this Dirichlet?
	 */
	public static double dirichletMultinomialLikelihoodRatio(TIntIntHashMap countsX,
			TIntIntHashMap countsY,
			double alpha, double alphaSum) {
//		The likelihood for one DCM is 
//		Gamma( alpha_sum )	 prod Gamma( alpha + N_i )
//		prod Gamma ( alpha )   Gamma ( alpha_sum + N )

//		When we divide this by the product of two other DCMs with the same
//		alpha parameter, the first term in the numerator cancels with the 
//		first term in the denominator. Then moving the remaining alpha-only
//		term to the numerator, we get
//		prod Gamma(alpha)	  prod Gamma( alpha + X_i + Y_i )
//		Gamma (alpha_sum)	 Gamma( alpha_sum + X_sum + Y_sum )
//		----------------------------------------------------------
//		prod Gamma(alpha + X_i)		  prod Gamma(alpha + Y_i)
//		Gamma( alpha_sum + X_sum )	  Gamma( alpha_sum + Y_sum )


		double logLikelihood = 0.0;
		double logGammaAlpha = logGamma(alpha);

		int totalX = 0;
		int totalY = 0;

		int key, x, y;

		TIntHashSet distinctKeys = new TIntHashSet();
		distinctKeys.addAll(countsX.keys());
		distinctKeys.addAll(countsY.keys());

		TIntIterator iterator = distinctKeys.iterator();
		while (iterator.hasNext()) {
			key = iterator.next();

			x = 0;
			if (countsX.containsKey(key)) {
				x = countsX.get(key);
			}

			y = 0;
			if (countsY.containsKey(key)) {
				y = countsY.get(key);
			}

			totalX += x;
			totalY += y;

			logLikelihood += logGamma(alpha) + logGamma(alpha + x + y)
			- logGamma(alpha + x) - logGamma(alpha + y);
		}

		logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY) 
		- logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY);

		return logLikelihood;
	}
 
开发者ID:iamxiatian,项目名称:wikit,代码行数:62,代码来源:Dirichlet.java

示例13: dirichletMultinomialLikelihoodRatio

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
/** What is the probability that these two observations were drawn from
	 *	the same multinomial with symmetric Dirichlet prior alpha, relative 
	 *	to the probability that they were drawn from different multinomials
	 *	both drawn from this Dirichlet?
	 */
	public static double dirichletMultinomialLikelihoodRatio(TIntIntHashMap countsX,
			TIntIntHashMap countsY,
			double alpha, double alphaSum) {
//		The likelihood for one DCM is 
//		Gamma( alpha_sum )	 prod Gamma( alpha + N_i )
//		prod Gamma ( alpha )   Gamma ( alpha_sum + N )

//		When we divide this by the product of two other DCMs with the same
//		alpha parameter, the first term in the numerator cancels with the 
//		first term in the denominator. Then moving the remaining alpha-only
//		term to the numerator, we get
//		prod Gamma(alpha)	  prod Gamma( alpha + X_i + Y_i )
//		Gamma (alpha_sum)	 Gamma( alpha_sum + X_sum + Y_sum )
//		----------------------------------------------------------
//		prod Gamma(alpha + X_i)		  prod Gamma(alpha + Y_i)
//		Gamma( alpha_sum + X_sum )	  Gamma( alpha_sum + Y_sum )


		double logLikelihood = 0.0;
		//double logGammaAlpha = logGamma(alpha);

		int totalX = 0;
		int totalY = 0;

		int key, x, y;

		TIntHashSet distinctKeys = new TIntHashSet();
		distinctKeys.addAll(countsX.keys());
		distinctKeys.addAll(countsY.keys());

		TIntIterator iterator = distinctKeys.iterator();
		while (iterator.hasNext()) {
			key = iterator.next();

			x = 0;
			if (countsX.containsKey(key)) {
				x = countsX.get(key);
			}

			y = 0;
			if (countsY.containsKey(key)) {
				y = countsY.get(key);
			}

			totalX += x;
			totalY += y;

			logLikelihood += logGamma(alpha) + logGamma(alpha + x + y)
			- logGamma(alpha + x) - logGamma(alpha + y);
		}

		logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY) 
		- logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY);

		return logLikelihood;
	}
 
开发者ID:tweninger,项目名称:nina,代码行数:62,代码来源:Dirichlet.java

示例14: dirichletMultinomialLikelihoodRatio

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
/**
     * What is the probability that these two observations were drawn from the
     * same multinomial with symmetric Dirichlet prior alpha, relative to the
     * probability that they were drawn from different multinomials both drawn
     * from this Dirichlet?
     */
    public static double dirichletMultinomialLikelihoodRatio(TIntIntHashMap countsX,
            TIntIntHashMap countsY,
            double alpha, double alphaSum) {
//		The likelihood for one DCM is 
//		Gamma( alpha_sum )	 prod Gamma( alpha + N_i )
//		prod Gamma ( alpha )   Gamma ( alpha_sum + N )

//		When we divide this by the product of two other DCMs with the same
//		alpha parameter, the first term in the numerator cancels with the 
//		first term in the denominator. Then moving the remaining alpha-only
//		term to the numerator, we get
//		prod Gamma(alpha)	  prod Gamma( alpha + X_i + Y_i )
//		Gamma (alpha_sum)	 Gamma( alpha_sum + X_sum + Y_sum )
//		----------------------------------------------------------
//		prod Gamma(alpha + X_i)		  prod Gamma(alpha + Y_i)
//		Gamma( alpha_sum + X_sum )	  Gamma( alpha_sum + Y_sum )
        double logLikelihood = 0.0;
        double logGammaAlpha = logGamma(alpha);

        int totalX = 0;
        int totalY = 0;

        int key, x, y;

        TIntHashSet distinctKeys = new TIntHashSet();
        distinctKeys.addAll(countsX.keys());
        distinctKeys.addAll(countsY.keys());

        TIntIterator iterator = distinctKeys.iterator();
        while (iterator.hasNext()) {
            key = iterator.next();

            x = 0;
            if (countsX.containsKey(key)) {
                x = countsX.get(key);
            }

            y = 0;
            if (countsY.containsKey(key)) {
                y = countsY.get(key);
            }

            totalX += x;
            totalY += y;

            logLikelihood += logGamma(alpha) + logGamma(alpha + x + y)
                    - logGamma(alpha + x) - logGamma(alpha + y);
        }

        logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY)
                - logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY);

        return logLikelihood;
    }
 
开发者ID:hmetaxa,项目名称:MixLDA,代码行数:61,代码来源:Dirichlet.java


注:本文中的gnu.trove.map.hash.TIntIntHashMap.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。