当前位置: 首页>>代码示例>>Java>>正文


Java LogPrior类代码示例

本文整理汇总了Java中edu.stanford.nlp.classify.LogPrior的典型用法代码示例。如果您正苦于以下问题:Java LogPrior类的具体用法?Java LogPrior怎么用?Java LogPrior使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


LogPrior类属于edu.stanford.nlp.classify包,在下文中一共展示了LogPrior类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: StochasticInPlaceMinimizer

import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
public StochasticInPlaceMinimizer(LogPrior prior, int numPasses, int batchSize, int tuningSamples)
{
  if (LogPrior.LogPriorType.QUADRATIC == prior.getType()) {
    sigma = prior.getSigma();
  } else {
    throw new RuntimeException("Unsupported prior type " + prior.getType());
  }
  if (numPasses >= 0) {
    this.numPasses = numPasses;
  } else {
    this.numPasses = DEFAULT_NUM_PASSES;
    sayln("  StochasticInPlaceMinimizer: numPasses=" + numPasses + ", defaulting to " + this.numPasses);
  }
  this.bSize = batchSize;
  if (tuningSamples > 0) {
    this.tuningSamples = tuningSamples;
  } else {
    this.tuningSamples = DEFAULT_TUNING_SAMPLES;
    sayln("  StochasticInPlaceMinimizer: tuneSampleSize=" + tuningSamples + ", defaulting to " + this.tuningSamples);
  }
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:22,代码来源:StochasticInPlaceMinimizer.java

示例2: adaptMaxEnt

import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
private void adaptMaxEnt(Dataset<String, String> adapt) {
  if (classifier instanceof LinearClassifier) {
    // So far the adaptation is only done on Gaussian Prior. Haven't checked how it'll work on other kinds of priors. -pichuan
    int prior = LogPrior.LogPriorType.QUADRATIC.ordinal();
    if (flags.useHuber) {
      throw new UnsupportedOperationException();
    } else if (flags.useQuartic) {
      throw new UnsupportedOperationException();
    }

    LinearClassifierFactory<String, String> lcf = new LinearClassifierFactory<String, String>(flags.tolerance, flags.useSum, prior, flags.adaptSigma, flags.epsilon, flags.QNsize);
    ((LinearClassifier<String, String>)classifier).adaptWeights(adapt,lcf);
  } else {
    throw new UnsupportedOperationException();
  }
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:17,代码来源:CMMClassifier.java

示例3: trainSemiSup

import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
private void trainSemiSup(Dataset<String, String> data, Dataset<String, String> biasedData, double[][] confusionMatrix) {
  int prior = LogPrior.LogPriorType.QUADRATIC.ordinal();
  if (flags.useHuber) {
    prior = LogPrior.LogPriorType.HUBER.ordinal();
  } else if (flags.useQuartic) {
    prior = LogPrior.LogPriorType.QUARTIC.ordinal();
  }

  LinearClassifierFactory<String, String> lcf;
  lcf = new LinearClassifierFactory<String, String>(flags.tolerance, flags.useSum, prior, flags.sigma, flags.epsilon, flags.QNsize);
  if (flags.useQN) {
    lcf.useQuasiNewton();
  } else{
    lcf.useConjugateGradientAscent();
  }

  this.classifier = (LinearClassifier<String, String>) lcf.trainClassifierSemiSup(data, biasedData, confusionMatrix, null);
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:19,代码来源:CMMClassifier.java

示例4: optimize

import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
@Override
public Counter<String> optimize(Counter<String> initialWts) {
  Counter<String> wts = new ClassicCounter<String>(initialWts);
  Counters.normalize(wts);
  double seedSeed = Math.abs(Counters.max(wts));
  long seed = (long)Math.exp(Math.log(seedSeed) + Math.log(Long.MAX_VALUE));
  System.err.printf("PRO thread using random seed: %d\n", seed);
  RVFDataset<String, String> proSamples = getSamples(new Random(seed));
  LogPrior lprior = new LogPrior();
  lprior.setSigma(l2sigma);
  LogisticClassifierFactory<String,String> lcf = new LogisticClassifierFactory<String,String>();
  LogisticClassifier<String, String> lc = lcf.trainClassifier(proSamples, lprior, false);
  Counter<String> decoderWeights = new ClassicCounter<String>(); 
  Counter<String> lcWeights = lc.weightsAsCounter();
  for (String key : lcWeights.keySet()) {
    double mul;
    if (key.startsWith("1 / ")) {
      mul = 1.0;
    } else if (key.startsWith("0 / ")) {
      mul = -1.0;
    } else {
      throw new RuntimeException("Unparsable weight name produced by logistic classifier: "+key);
    }
    String decoderKey = key.replaceFirst("^[10] / ", "");
    decoderWeights.incrementCount(decoderKey, mul*lcWeights.getCount(key));
  }

  synchronized (MERT.bestWts) {
    if (!updatedBestOnce) {
      System.err.println("Force updating weights (once)");
      double metricEval = MERT.evalAtPoint(nbest, decoderWeights, emetric);
      MERT.updateBest(decoderWeights, metricEval, true);
      updatedBestOnce = true;
    }
  }
  return decoderWeights;
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:38,代码来源:PairwiseRankingOptimizer.java

示例5: trainMaxEnt

import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
private void trainMaxEnt(Dataset<String, String> train) {
  int prior = LogPrior.LogPriorType.QUADRATIC.ordinal();
  if (flags.useHuber) {
    prior = LogPrior.LogPriorType.HUBER.ordinal();
  } else if (flags.useQuartic) {
    prior = LogPrior.LogPriorType.QUARTIC.ordinal();
  }

  LinearClassifier<String, String> lc;
  if (flags.useNB) {
    lc = new NBLinearClassifierFactory<String, String>(flags.sigma).trainClassifier(train);
  } else {
    LinearClassifierFactory<String, String> lcf = new LinearClassifierFactory<String, String>(flags.tolerance, flags.useSum, prior, flags.sigma, flags.epsilon, flags.QNsize);
    if (flags.useQN) {
      lcf.useQuasiNewton(flags.useRobustQN);
    } else if(flags.useStochasticQN) {
      lcf.useStochasticQN(flags.initialGain,flags.stochasticBatchSize);
    } else if(flags.useSMD) {
      lcf.useStochasticMetaDescent(flags.initialGain, flags.stochasticBatchSize,flags.stochasticMethod,flags.SGDPasses);
    } else if(flags.useSGD) {
      lcf.useStochasticGradientDescent(flags.gainSGD,flags.stochasticBatchSize);
    } else if(flags.useSGDtoQN) {
      lcf.useStochasticGradientDescentToQuasiNewton(flags.initialGain, flags.stochasticBatchSize,
                                     flags.SGDPasses, flags.QNPasses, flags.SGD2QNhessSamples,
                                     flags.QNsize, flags.outputIterationsToFile);
    } else if(flags.useHybrid) {
      lcf.useHybridMinimizer(flags.initialGain, flags.stochasticBatchSize ,flags.stochasticMethod ,flags.hybridCutoffIteration );
    } else {
      lcf.useConjugateGradientAscent();
    }
    lc = lcf.trainClassifier(train);
  }
  this.classifier = lc;
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:35,代码来源:CMMClassifier.java

示例6: retrain

import edu.stanford.nlp.classify.LogPrior; //导入依赖的package包/类
/**
 * @param featureLabels retrain docs
 * @param featureIndex featureIndex of original dataset (used in training)
 * @param labelIndex labelIndex of original dataset (used in training)
 */
public void retrain(ObjectBank<List<IN>> featureLabels, Index<String> featureIndex, Index<String> labelIndex) {
  int fs = featureIndex.size(); // old dim
  int ls = labelIndex.size();   // old dim

  Dataset<String, String> adapt = getDataset(featureLabels, featureIndex, labelIndex);

  int prior = LogPrior.LogPriorType.QUADRATIC.ordinal();
  LinearClassifier<String, String> lc = (LinearClassifier<String, String>) classifier;
  LinearClassifierFactory<String, String> lcf = new LinearClassifierFactory<String, String>(flags.tolerance, flags.useSum, prior, flags.sigma, flags.epsilon, flags.QNsize);

  double[][] weights = lc.weights();  // old dim
  Index<String> newF = adapt.featureIndex;
  Index<String> newL = adapt.labelIndex;
  int newFS = newF.size();
  int newLS = newL.size();
  double[] x = new double[newFS*newLS]; // new dim
  //System.err.println("old  ["+fs+"]"+"["+ls+"]");
  //System.err.println("new  ["+newFS+"]"+"["+newLS+"]");
  //System.err.println("new  ["+newFS*newLS+"]");
  for (int i = 0; i < fs; i++) {
    for (int j = 0; j < ls; j++) {
      String f = featureIndex.get(i);
      String l = labelIndex.get(j);
      int newi = newF.indexOf(f)*newLS+newL.indexOf(l);
      x[newi] = weights[i][j];
      //if (newi == 144745*2) {
      //System.err.println("What??"+i+"\t"+j);
      //}
    }
  }
  //System.err.println("x[144745*2]"+x[144745*2]);
  weights = lcf.trainWeights(adapt, x);
  //System.err.println("x[144745*2]"+x[144745*2]);
  //System.err.println("weights[144745]"+"[0]="+weights[144745][0]);

  lc.setWeights(weights);
  /*
  int delme = 0;
  if (true) {
    for (double[] dd : weights) {
      delme++;
      for (double d : dd) {
      }
    }
  }
  System.err.println(weights[delme-1][0]);
  System.err.println("size of weights: "+delme);
  */
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:55,代码来源:CMMClassifier.java


注:本文中的edu.stanford.nlp.classify.LogPrior类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。