当前位置: 首页>>代码示例>>Java>>正文


Java DiffFunction类代码示例

本文整理汇总了Java中edu.stanford.nlp.optimization.DiffFunction的典型用法代码示例。如果您正苦于以下问题:Java DiffFunction类的具体用法?Java DiffFunction怎么用?Java DiffFunction使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DiffFunction类属于edu.stanford.nlp.optimization包,在下文中一共展示了DiffFunction类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: OptimizerState

import edu.stanford.nlp.optimization.DiffFunction; //导入依赖的package包/类
OptimizerState(DiffFunction f, double[] init, int m, double l1weight, boolean quiet) {
	this.x = init;
	this.grad = new double[init.length];
	this.newX = init.clone();
	this.newGrad = new double[init.length];
	this.dir = new double[init.length];
	this.steepestDescDir = newGrad.clone();
	this.alphas = new double[m];
	this.iter = 1;
	this.m = m;
	this.dim = init.length;
	this.func = f;
	this.l1weight = l1weight;
	this.quiet = quiet;

	if (m <= 0)
		throw new RuntimeException("m must be an integer greater than zero.");

	value = evalL1();
	grad = newGrad.clone();
}
 
开发者ID:weizh,项目名称:geolocator-3.0,代码行数:22,代码来源:OWLQN.java

示例2: OptimizerState

import edu.stanford.nlp.optimization.DiffFunction; //导入依赖的package包/类
OptimizerState(DiffFunction f, double[] init, int m, double l1weight, boolean quiet) {
    this.x = init;
    this.grad = new double[init.length];
    this.newX = init.clone();
    this.newGrad = new double[init.length];
    this.dir = new double[init.length];
    this.steepestDescDir = newGrad.clone();
    this.alphas = new double[m];
    this.iter = 1;
    this.m = m;
    this.dim = init.length;
    this.func = f;
    this.l1weight = l1weight;
    this.quiet = quiet;

    if (m <= 0) {
        throw new RuntimeException("m must be an integer greater than zero.");
    }

    value = evalL1();
    grad = newGrad.clone();
}
 
开发者ID:vietansegan,项目名称:segan,代码行数:23,代码来源:OWLQN.java

示例3: optimize

import edu.stanford.nlp.optimization.DiffFunction; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public Counter<String> optimize(Counter<String> initialWts) {
  Counter<String> wts = new ClassicCounter<String>(initialWts);

  EvaluationMetric<IString, String> modelMetric = new LinearCombinationMetric<IString, String>(
      new double[] { 1.0 },
      new ScorerWrapperEvaluationMetric<IString, String>(new DenseScorer(
          initialWts)));

  List<ScoredFeaturizedTranslation<IString, String>> current = (new HillClimbingMultiTranslationMetricMax<IString, String>(
      modelMetric)).maximize(nbest);

  List<ScoredFeaturizedTranslation<IString, String>> target = (new HillClimbingMultiTranslationMetricMax<IString, String>(
      emetric)).maximize(nbest);

  System.err.println("Target model: " + modelMetric.score(target)
      + " metric: " + emetric.score(target));
  System.err.println("Current model: " + modelMetric.score(current)
      + " metric: " + emetric.score(current));

  // create a mapping between weight names and optimization
  // weight vector positions
  String[] weightNames = new String[wts.size()];
  double[] initialWtsArr = new double[wts.size()];

  int nameIdx = 0;
  for (String feature : wts.keySet()) {
    initialWtsArr[nameIdx] = wts.getCount(feature);
    weightNames[nameIdx++] = feature;
  }

  double[][] lossMatrix = OptimizerUtils.calcDeltaMetric(nbest, target,
      emetric);

  Minimizer<DiffFunction> qn = new QNMinimizer(15, true);
  SoftMaxMarginMarkovNetwork sm3n = new SoftMaxMarginMarkovNetwork(
      weightNames, target, lossMatrix);
  double initialValueAt = sm3n.valueAt(initialWtsArr);
  if (initialValueAt == Double.POSITIVE_INFINITY
      || initialValueAt != initialValueAt) {
    System.err
        .printf("Initial Objective is infinite/NaN - normalizing weight vector");
    double normTerm = Counters.L2Norm(wts);
    for (int i = 0; i < initialWtsArr.length; i++) {
      initialWtsArr[i] /= normTerm;
    }
  }
  double initialObjValue = sm3n.valueAt(initialWtsArr);
  double initalDNorm = OptimizerUtils.norm2DoubleArray(sm3n
      .derivativeAt(initialWtsArr));
  double initalXNorm = OptimizerUtils.norm2DoubleArray(initialWtsArr);

  System.err.println("Initial Objective value: " + initialObjValue);
  double newX[] = qn.minimize(sm3n, 1e-4, initialWtsArr); // new
                                                          // double[wts.size()]
  Counter<String> newWts = OptimizerUtils.getWeightCounterFromArray(
      weightNames, newX);
  double finalObjValue = sm3n.valueAt(newX);

  double objDiff = initialObjValue - finalObjValue;
  double finalDNorm = OptimizerUtils
      .norm2DoubleArray(sm3n.derivativeAt(newX));
  double finalXNorm = OptimizerUtils.norm2DoubleArray(newX);
  double metricEval = MERT.evalAtPoint(nbest, newWts, emetric);
  System.err.println(">>>[Converge Info] ObjInit(" + initialObjValue
      + ") - ObjFinal(" + finalObjValue + ") = ObjDiff(" + objDiff
      + ") L2DInit(" + initalDNorm + ") L2DFinal(" + finalDNorm
      + ") L2XInit(" + initalXNorm + ") L2XFinal(" + finalXNorm + ")");

  MERT.updateBest(newWts, metricEval, true);

  return newWts;
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:74,代码来源:SoftmaxMaxMarginMarkovNetwork.java

示例4: minimize

import edu.stanford.nlp.optimization.DiffFunction; //导入依赖的package包/类
public double[] minimize(DiffFunction function, double[] initial, double l1weight, double tol, int m) {

		OptimizerState state = new OptimizerState(function, initial, m, l1weight, quiet);

		if (!quiet) {
			System.err.printf("Optimizing function of %d variables with OWL-QN parameters:\n", state.dim);
			System.err.printf("   l1 regularization weight: %f.\n", l1weight);
			System.err.printf("   L-BFGS memory parameter (m): %d\n", m);
			System.err.printf("   Convergence tolerance: %f\n\n", tol);
			System.err.printf("Iter    n:\tnew_value\tdf\t(conv_crit)\tline_search\n");
			System.err.printf("Iter    0:\t%.4e\t\t(***********)\t", state.value);
		}

		StringBuilder buf = new StringBuilder();
		termCrit.getValue(state, buf);

		for(int i=0; i<maxIters; i++){
			buf.setLength(0);
			state.updateDir();				
			state.backTrackingLineSearch();
			
			double termCritVal = termCrit.getValue(state, buf);
			if (!quiet) {
				int numnonzero = ArrayMath.countNonZero(state.newX);
				System.err.printf("Iter %4d:\t%.4e\t%d", state.iter, state.value, numnonzero);
				System.err.print("\t"+ buf.toString());
				
				if (printer!=null)
					printer.printWeights();
			}

			//mheilman: I added this check because OWLQN was failing without it sometimes 
			//for large L1 penalties and few features...
			//This checks that the parameters changed in the last iteration.
			//If they didn't, then OWL-QN will try to divide by zero when approximating the Hessian.
			//The ro values end up 0 when the line search ends up trying a newX that equals X (or newGrad and grad).
			//That causes the differences stored in sList and yList to be zero, which eventually causes 
			//values in roList to be zero.  Below, ro values appear in the denominator, and they would 
			//cause the program to crash in the mapDirByInverseHessian() method if any were zero.
			//This only appears to happen once the parameters have already converged.  
			//I suspect that numerical loss of precision is the cause.
			if(arrayEquals(state.x, state.newX)){
				System.err.println("Warning: Stopping OWL-QN since there was no change in the parameters in the last iteration.  This probably means convergence has been reached.");
				break;
			}
					
			if (termCritVal < tol)
				break;

			state.shift();
		}

		if (!quiet) {
			System.err.println();
			System.err.printf("Finished with optimization.  %d/%d non-zero weights.\n",
					ArrayMath.countNonZero(state.newX), state.newX.length);
		}

		return state.newX;
	}
 
开发者ID:weizh,项目名称:geolocator-3.0,代码行数:61,代码来源:OWLQN.java

示例5: minimize

import edu.stanford.nlp.optimization.DiffFunction; //导入依赖的package包/类
public double[] minimize(DiffFunction function, double[] initial) {
    return minimize(function, initial, 1.0);
}
 
开发者ID:vietansegan,项目名称:segan,代码行数:4,代码来源:OWLQN.java


注:本文中的edu.stanford.nlp.optimization.DiffFunction类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。