本文整理汇总了Java中edu.stanford.nlp.classify.LogPrior类的典型用法代码示例。如果您正苦于以下问题:Java LogPrior类的具体用法?Java LogPrior怎么用?Java LogPrior使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
LogPrior类属于edu.stanford.nlp.classify包,在下文中一共展示了LogPrior类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: StochasticInPlaceMinimizer
import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
public StochasticInPlaceMinimizer(LogPrior prior, int numPasses, int batchSize, int tuningSamples)
{
if (LogPrior.LogPriorType.QUADRATIC == prior.getType()) {
sigma = prior.getSigma();
} else {
throw new RuntimeException("Unsupported prior type " + prior.getType());
}
if (numPasses >= 0) {
this.numPasses = numPasses;
} else {
this.numPasses = DEFAULT_NUM_PASSES;
sayln(" StochasticInPlaceMinimizer: numPasses=" + numPasses + ", defaulting to " + this.numPasses);
}
this.bSize = batchSize;
if (tuningSamples > 0) {
this.tuningSamples = tuningSamples;
} else {
this.tuningSamples = DEFAULT_TUNING_SAMPLES;
sayln(" StochasticInPlaceMinimizer: tuneSampleSize=" + tuningSamples + ", defaulting to " + this.tuningSamples);
}
}
示例2: adaptMaxEnt
import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
private void adaptMaxEnt(Dataset<String, String> adapt) {
if (classifier instanceof LinearClassifier) {
// So far the adaptation is only done on Gaussian Prior. Haven't checked how it'll work on other kinds of priors. -pichuan
int prior = LogPrior.LogPriorType.QUADRATIC.ordinal();
if (flags.useHuber) {
throw new UnsupportedOperationException();
} else if (flags.useQuartic) {
throw new UnsupportedOperationException();
}
LinearClassifierFactory<String, String> lcf = new LinearClassifierFactory<String, String>(flags.tolerance, flags.useSum, prior, flags.adaptSigma, flags.epsilon, flags.QNsize);
((LinearClassifier<String, String>)classifier).adaptWeights(adapt,lcf);
} else {
throw new UnsupportedOperationException();
}
}
示例3: trainSemiSup
import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
private void trainSemiSup(Dataset<String, String> data, Dataset<String, String> biasedData, double[][] confusionMatrix) {
int prior = LogPrior.LogPriorType.QUADRATIC.ordinal();
if (flags.useHuber) {
prior = LogPrior.LogPriorType.HUBER.ordinal();
} else if (flags.useQuartic) {
prior = LogPrior.LogPriorType.QUARTIC.ordinal();
}
LinearClassifierFactory<String, String> lcf;
lcf = new LinearClassifierFactory<String, String>(flags.tolerance, flags.useSum, prior, flags.sigma, flags.epsilon, flags.QNsize);
if (flags.useQN) {
lcf.useQuasiNewton();
} else{
lcf.useConjugateGradientAscent();
}
this.classifier = (LinearClassifier<String, String>) lcf.trainClassifierSemiSup(data, biasedData, confusionMatrix, null);
}
示例4: optimize
import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
@Override
public Counter<String> optimize(Counter<String> initialWts) {
Counter<String> wts = new ClassicCounter<String>(initialWts);
Counters.normalize(wts);
double seedSeed = Math.abs(Counters.max(wts));
long seed = (long)Math.exp(Math.log(seedSeed) + Math.log(Long.MAX_VALUE));
System.err.printf("PRO thread using random seed: %d\n", seed);
RVFDataset<String, String> proSamples = getSamples(new Random(seed));
LogPrior lprior = new LogPrior();
lprior.setSigma(l2sigma);
LogisticClassifierFactory<String,String> lcf = new LogisticClassifierFactory<String,String>();
LogisticClassifier<String, String> lc = lcf.trainClassifier(proSamples, lprior, false);
Counter<String> decoderWeights = new ClassicCounter<String>();
Counter<String> lcWeights = lc.weightsAsCounter();
for (String key : lcWeights.keySet()) {
double mul;
if (key.startsWith("1 / ")) {
mul = 1.0;
} else if (key.startsWith("0 / ")) {
mul = -1.0;
} else {
throw new RuntimeException("Unparsable weight name produced by logistic classifier: "+key);
}
String decoderKey = key.replaceFirst("^[10] / ", "");
decoderWeights.incrementCount(decoderKey, mul*lcWeights.getCount(key));
}
synchronized (MERT.bestWts) {
if (!updatedBestOnce) {
System.err.println("Force updating weights (once)");
double metricEval = MERT.evalAtPoint(nbest, decoderWeights, emetric);
MERT.updateBest(decoderWeights, metricEval, true);
updatedBestOnce = true;
}
}
return decoderWeights;
}
示例5: trainMaxEnt
import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
private void trainMaxEnt(Dataset<String, String> train) {
int prior = LogPrior.LogPriorType.QUADRATIC.ordinal();
if (flags.useHuber) {
prior = LogPrior.LogPriorType.HUBER.ordinal();
} else if (flags.useQuartic) {
prior = LogPrior.LogPriorType.QUARTIC.ordinal();
}
LinearClassifier<String, String> lc;
if (flags.useNB) {
lc = new NBLinearClassifierFactory<String, String>(flags.sigma).trainClassifier(train);
} else {
LinearClassifierFactory<String, String> lcf = new LinearClassifierFactory<String, String>(flags.tolerance, flags.useSum, prior, flags.sigma, flags.epsilon, flags.QNsize);
if (flags.useQN) {
lcf.useQuasiNewton(flags.useRobustQN);
} else if(flags.useStochasticQN) {
lcf.useStochasticQN(flags.initialGain,flags.stochasticBatchSize);
} else if(flags.useSMD) {
lcf.useStochasticMetaDescent(flags.initialGain, flags.stochasticBatchSize,flags.stochasticMethod,flags.SGDPasses);
} else if(flags.useSGD) {
lcf.useStochasticGradientDescent(flags.gainSGD,flags.stochasticBatchSize);
} else if(flags.useSGDtoQN) {
lcf.useStochasticGradientDescentToQuasiNewton(flags.initialGain, flags.stochasticBatchSize,
flags.SGDPasses, flags.QNPasses, flags.SGD2QNhessSamples,
flags.QNsize, flags.outputIterationsToFile);
} else if(flags.useHybrid) {
lcf.useHybridMinimizer(flags.initialGain, flags.stochasticBatchSize ,flags.stochasticMethod ,flags.hybridCutoffIteration );
} else {
lcf.useConjugateGradientAscent();
}
lc = lcf.trainClassifier(train);
}
this.classifier = lc;
}
示例6: retrain
import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
/**
* @param featureLabels retrain docs
* @param featureIndex featureIndex of original dataset (used in training)
* @param labelIndex labelIndex of original dataset (used in training)
*/
public void retrain(ObjectBank<List<IN>> featureLabels, Index<String> featureIndex, Index<String> labelIndex) {
int fs = featureIndex.size(); // old dim
int ls = labelIndex.size(); // old dim
Dataset<String, String> adapt = getDataset(featureLabels, featureIndex, labelIndex);
int prior = LogPrior.LogPriorType.QUADRATIC.ordinal();
LinearClassifier<String, String> lc = (LinearClassifier<String, String>) classifier;
LinearClassifierFactory<String, String> lcf = new LinearClassifierFactory<String, String>(flags.tolerance, flags.useSum, prior, flags.sigma, flags.epsilon, flags.QNsize);
double[][] weights = lc.weights(); // old dim
Index<String> newF = adapt.featureIndex;
Index<String> newL = adapt.labelIndex;
int newFS = newF.size();
int newLS = newL.size();
double[] x = new double[newFS*newLS]; // new dim
//System.err.println("old ["+fs+"]"+"["+ls+"]");
//System.err.println("new ["+newFS+"]"+"["+newLS+"]");
//System.err.println("new ["+newFS*newLS+"]");
for (int i = 0; i < fs; i++) {
for (int j = 0; j < ls; j++) {
String f = featureIndex.get(i);
String l = labelIndex.get(j);
int newi = newF.indexOf(f)*newLS+newL.indexOf(l);
x[newi] = weights[i][j];
//if (newi == 144745*2) {
//System.err.println("What??"+i+"\t"+j);
//}
}
}
//System.err.println("x[144745*2]"+x[144745*2]);
weights = lcf.trainWeights(adapt, x);
//System.err.println("x[144745*2]"+x[144745*2]);
//System.err.println("weights[144745]"+"[0]="+weights[144745][0]);
lc.setWeights(weights);
/*
int delme = 0;
if (true) {
for (double[] dd : weights) {
delme++;
for (double d : dd) {
}
}
}
System.err.println(weights[delme-1][0]);
System.err.println("size of weights: "+delme);
*/
}