当前位置: 首页>>代码示例>>Java>>正文


Java TIntIntHashMap.remove方法代码示例

本文整理汇总了Java中gnu.trove.map.hash.TIntIntHashMap.remove方法的典型用法代码示例。如果您正苦于以下问题:Java TIntIntHashMap.remove方法的具体用法?Java TIntIntHashMap.remove怎么用?Java TIntIntHashMap.remove使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在gnu.trove.map.hash.TIntIntHashMap的用法示例。


在下文中一共展示了TIntIntHashMap.remove方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: sampleTopicsForOneTestDocAll

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
private void sampleTopicsForOneTestDocAll(FeatureSequence tokenSequence,
		LabelSequence topicSequence) {
	// TODO Auto-generated method stub
	int[] oneDocTopics = topicSequence.getFeatures();

	TIntIntHashMap currentTypeTopicCounts;
	int type, oldTopic, newTopic;
	double tw;
	double[] topicWeights = new double[numTopics];
	double topicWeightsSum;
	int docLength = tokenSequence.getLength();

	//		populate topic counts
	int[] localTopicCounts = new int[numTopics];
	for (int ti = 0; ti < numTopics; ti++){
		localTopicCounts[ti] = 0;
	}
	for (int position = 0; position < docLength; position++) {
		localTopicCounts[oneDocTopics[position]] ++;
	}

	// Iterate over the positions (words) in the document
	for (int si = 0; si < docLength; si++) {
		type = tokenSequence.getIndexAtPosition(si);
		oldTopic = oneDocTopics[si];

		// Remove this token from all counts
		localTopicCounts[oldTopic] --;

		currentTypeTopicCounts = typeTopicCounts[type];
		assert(currentTypeTopicCounts.get(oldTopic) >= 0);

		if (currentTypeTopicCounts.get(oldTopic) == 1) {
			currentTypeTopicCounts.remove(oldTopic);
		}
		else {
			currentTypeTopicCounts.adjustValue(oldTopic, -1);
		}
		tokensPerTopic[oldTopic]--;

		// Build a distribution over topics for this token
		Arrays.fill (topicWeights, 0.0);
		topicWeightsSum = 0;

		for (int ti = 0; ti < numTopics; ti++) {
			tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
			      * ((localTopicCounts[ti] + alpha[ti])); // (/docLen-1+tAlpha); is constant across all topics
			topicWeightsSum += tw;
			topicWeights[ti] = tw;
		}
		// Sample a topic assignment from this distribution
		newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);

		// Put that new topic into the counts
		oneDocTopics[si] = newTopic;
		currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
		localTopicCounts[newTopic] ++;
		tokensPerTopic[newTopic]++;
	}
}
 
开发者ID:iamxiatian,项目名称:wikit,代码行数:61,代码来源:LDAStream.java

示例2: sampleTopicsForOneTestDoc

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
private void sampleTopicsForOneTestDoc(FeatureSequence tokenSequence,
		LabelSequence topicSequence) {
	// TODO Auto-generated method stub
	int[] oneDocTopics = topicSequence.getFeatures();

	TIntIntHashMap currentTypeTopicCounts;
	int type, oldTopic, newTopic;
	double tw;
	double[] topicWeights = new double[numTopics];
	double topicWeightsSum;
	int docLength = tokenSequence.getLength();

	//		populate topic counts
	int[] localTopicCounts = new int[numTopics];
	for (int ti = 0; ti < numTopics; ti++){
		localTopicCounts[ti] = 0;
	}
	for (int position = 0; position < docLength; position++) {
		if(oneDocTopics[position] != -1) {
			localTopicCounts[oneDocTopics[position]] ++;
		}
	}

	// Iterate over the positions (words) in the document
	for (int si = 0; si < docLength; si++) {
		type = tokenSequence.getIndexAtPosition(si);
		oldTopic = oneDocTopics[si];
		if(oldTopic == -1) {
			continue;
		}

		// Remove this token from all counts
    		localTopicCounts[oldTopic] --;
    		currentTypeTopicCounts = typeTopicCounts[type];
		assert(currentTypeTopicCounts.get(oldTopic) >= 0);

		if (currentTypeTopicCounts.get(oldTopic) == 1) {
			currentTypeTopicCounts.remove(oldTopic);
		}
		else {
			currentTypeTopicCounts.adjustValue(oldTopic, -1);
		}
		tokensPerTopic[oldTopic]--;

		// Build a distribution over topics for this token
		Arrays.fill (topicWeights, 0.0);
		topicWeightsSum = 0;

		for (int ti = 0; ti < numTopics; ti++) {
			tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
			      * ((localTopicCounts[ti] + alpha[ti])); // (/docLen-1+tAlpha); is constant across all topics
			topicWeightsSum += tw;
			topicWeights[ti] = tw;
		}
		// Sample a topic assignment from this distribution
		newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);

		// Put that new topic into the counts
		oneDocTopics[si] = newTopic;
		currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
		localTopicCounts[newTopic] ++;
		tokensPerTopic[newTopic]++;
	}
}
 
开发者ID:iamxiatian,项目名称:wikit,代码行数:65,代码来源:LDAStream.java

示例3: sampleTopicsForOneDocWithTheta

import gnu.trove.map.hash.TIntIntHashMap; //导入方法依赖的package包/类
private void sampleTopicsForOneDocWithTheta(FeatureSequence tokenSequence,
		LabelSequence topicSequence, double[] topicDistribution) {
	// TODO Auto-generated method stub
	int[] oneDocTopics = topicSequence.getFeatures();

	TIntIntHashMap currentTypeTopicCounts;
	int type, oldTopic, newTopic;
	double tw;
	double[] topicWeights = new double[numTopics];
	double topicWeightsSum;
	int docLength = tokenSequence.getLength();
	
	// Iterate over the positions (words) in the document
	for (int si = 0; si < docLength; si++) {
		type = tokenSequence.getIndexAtPosition(si);
		oldTopic = oneDocTopics[si];
		if(oldTopic == -1) {
			continue;
		}

 		currentTypeTopicCounts = typeTopicCounts[type];
		assert(currentTypeTopicCounts.get(oldTopic) >= 0);

		if (currentTypeTopicCounts.get(oldTopic) == 1) {
			currentTypeTopicCounts.remove(oldTopic);
		}
		else {
			currentTypeTopicCounts.adjustValue(oldTopic, -1);
		}
		tokensPerTopic[oldTopic]--;

		// Build a distribution over topics for this token
		Arrays.fill (topicWeights, 0.0);
		topicWeightsSum = 0;

		for (int ti = 0; ti < numTopics; ti++) {
			tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
			      * topicDistribution[ti]; // (/docLen-1+tAlpha); is constant across all topics
			topicWeightsSum += tw;
			topicWeights[ti] = tw;
		}
		// Sample a topic assignment from this distribution
		newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);

		// Put that new topic into the counts
		oneDocTopics[si] = newTopic;
		currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
		tokensPerTopic[newTopic]++;
	}
}
 
开发者ID:iamxiatian,项目名称:wikit,代码行数:51,代码来源:LDAStream.java


注:本文中的gnu.trove.map.hash.TIntIntHashMap.remove方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。