本文整理汇总了Java中edu.stanford.nlp.math.ArrayMath.argmax方法的典型用法代码示例。如果您正苦于以下问题:Java ArrayMath.argmax方法的具体用法?Java ArrayMath.argmax怎么用?Java ArrayMath.argmax使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.math.ArrayMath
的用法示例。
在下文中一共展示了ArrayMath.argmax方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: greedyDecode
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* THIS CLOBBERS THE LABELS, stores its decoding into them.
* Does progressive rolling edge feature extraction
**/
public void greedyDecode(ModelSentence sentence, boolean storeConfidences) {
int T = sentence.T;
sentence.labels = new int[T];
sentence.edgeFeatures[0] = startMarker();
if (storeConfidences) sentence.confidences = new double[T];
double[] labelScores = new double[numLabels];
for (int t=0; t<T; t++) {
computeLabelScores(t, sentence, labelScores);
sentence.labels[t] = ArrayMath.argmax(labelScores);
if (t < T-1)
sentence.edgeFeatures[t+1] = sentence.labels[t];
if (storeConfidences) {
ArrayMath.expInPlace(labelScores);
double Z = ArrayMath.sum(labelScores);
ArrayMath.multiplyInPlace(labelScores, 1.0/Z);
sentence.confidences[t] = labelScores[ sentence.labels[t] ];
}
}
}
示例2: getBestTag
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* Determine best tag based on current word and its immediate predecessors.
*
* @param s
* <i>leftWindow</i> plus one words
* @param o
* Offset with respect to last position.
* @return Best tag and its probability.
*/
public Pair<IString, Float> getBestTag(IString[] s, int o) {
int loc = s.length - 1 + o;
IStringArrayWrapper aw = null;
Pair<IString, Float> tag;
if (CACHE_POS) {
aw = new IStringArrayWrapper(s);
tag = cache.get(aw);
if (tag != null)
return tag;
}
init(s);
int[] bestTags = new int[len];
int[][] vals = new int[len][];
for(int pos = 0 ; pos < len ; pos++) {
vals[pos] = getPossibleValues(pos);
bestTags[pos] = vals[pos][0];
}
this.initializeScorer();
double[] scores = scoresOf(bestTags, loc);
int am = ArrayMath.argmax(scores);
// TODO
bestTags[loc] = vals[loc][am];
cleanUpScorer();
tag = new Pair<IString, Float>(new IString(maxentTagger.getTag(bestTags[loc])),
(float) scores[am]);
if (CACHE_POS)
cache.put(aw, tag);
return tag;
}
示例3: samplePositionHelper
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* Samples a single position in the sequence.
* Does not modify the sequence passed in.
* returns the score of the new label for the position to sample
* @param sequence the sequence to start with
* @param pos the position to sample.
* @param temperature the temperature to control annealing
*/
private Pair<Integer, Double> samplePositionHelper(SequenceModel model, int[] sequence, int pos, double temperature) {
double[] distribution = model.scoresOf(sequence, pos);
if (temperature!=1.0) {
if (temperature==0.0) {
// set the max to 1.0
int argmax = ArrayMath.argmax(distribution);
Arrays.fill(distribution, Double.NEGATIVE_INFINITY);
distribution[argmax] = 0.0;
} else {
// take all to a power
// use the temperature to increase/decrease the entropy of the sampling distribution
ArrayMath.multiplyInPlace(distribution, 1.0/temperature);
}
}
ArrayMath.logNormalize(distribution);
ArrayMath.expInPlace(distribution);
if (BisequenceEmpiricalNERPrior.DEBUG) {
if (BisequenceEmpiricalNERPrior.debugIndices.indexOf(pos) != -1) {
System.err.println("final model:");
for (int j = 0; j < distribution.length; j++)
System.err.println("\t" + distribution[j]);
System.err.println();
}
}
int newTag = ArrayMath.sampleFromDistribution(distribution, random);
double newProb = distribution[newTag];
return new Pair<Integer, Double>(newTag, newProb);
}
示例4: mbrDecode
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public void mbrDecode(ModelSentence sentence) {
double[][] posterior = inferPosteriorForUnknownLabels(sentence);
for (int t=0; t < sentence.T; t++) {
sentence.labels[t] = ArrayMath.argmax(posterior[t]);
}
}