本文整理汇总了Java中edu.stanford.nlp.math.ArrayMath.norm方法的典型用法代码示例。如果您正苦于以下问题:Java ArrayMath.norm方法的具体用法?Java ArrayMath.norm怎么用?Java ArrayMath.norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.math.ArrayMath
的用法示例。
在下文中一共展示了ArrayMath.norm方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: start
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public void start(double val, double[] grad, double[] x) {
startTime = System.currentTimeMillis();
gNormInit = ArrayMath.norm(grad);
xLast = x;
writeToFile(1, val, gNormInit, 0.0);
if (x != null) {
monitorX(x);
}
}
示例2: add
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public void add(double val, double[] grad, double[] x, int fevals, double evalScore) {
if (!memoryConscious) {
if (gNorms.size() > maxSize) {
gNorms.remove(0);
}
if (time.size() > maxSize) {
time.remove(0);
}
if (funcEvals.size() > maxSize) {
funcEvals.remove(0);
}
gNorms.add(gNormLast);
time.add(howLong());
funcEvals.add(fevals);
} else {
maxSize = 10;
}
gNormLast = ArrayMath.norm(grad);
if (values.size() > maxSize) {
values.remove(0);
}
values.add(val);
if (evalScore != Double.NEGATIVE_INFINITY)
evals.add(evalScore);
writeToFile(fevals, val, gNormLast, howLong());
say(nf.format(val) + " " + nfsec.format(howLong()) + "s");
xLast = x;
monitorX(x);
}
示例3: getVariance
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public double[] getVariance(double[] x, int batchSize){
double[] ret = new double[4];
double[] fullHx = new double[thisFunc.domainDimension()];
double[] thisHx = new double[x.length];
double[] thisGrad = new double[x.length];
List<double[]> HxList = new ArrayList<double[]>();
/*
PrintWriter file = null;
NumberFormat nf = new DecimalFormat("0.000E0");
try{
file = new PrintWriter(new FileOutputStream("var.out"),true);
}
catch (IOException e){
System.err.println("Caught IOException outputing List to file: " + e.getMessage());
System.exit(1);
}
*/
//get the full hessian
thisFunc.sampleMethod = AbstractStochasticCachingDiffFunction.SamplingMethod.Ordered;
System.arraycopy(thisFunc.derivativeAt(x,x,thisFunc.dataDimension()),0,thisGrad,0,thisGrad.length);
System.arraycopy(thisFunc.HdotVAt(x,x,thisGrad,thisFunc.dataDimension()),0,fullHx,0,fullHx.length);
double fullNorm = ArrayMath.norm(fullHx);
double hessScale = ((double) thisFunc.dataDimension()) / ((double) batchSize);
thisFunc.sampleMethod = AbstractStochasticCachingDiffFunction.SamplingMethod.RandomWithReplacement;
int n = 100;
double simDelta;
double ratDelta;
double simMean = 0;
double ratMean = 0;
double simS = 0;
double ratS = 0;
int k = 0;
System.err.println(fullHx[4] +" " + x[4]);
for(int i = 0; i<n; i++){
System.arraycopy(thisFunc.derivativeAt(x,x,batchSize),0,thisGrad,0,thisGrad.length);
System.arraycopy(thisFunc.HdotVAt(x,x,thisGrad,batchSize),0,thisHx,0,thisHx.length);
ArrayMath.multiplyInPlace(thisHx,hessScale);
double thisNorm = ArrayMath.norm(thisHx);
double sim = ArrayMath.innerProduct(thisHx,fullHx)/(thisNorm*fullNorm);
double rat = thisNorm/fullNorm;
k += 1;
simDelta = sim - simMean;
simMean += simDelta/k;
simS += simDelta*(sim-simMean);
ratDelta = rat-ratMean;
ratMean += ratDelta/k;
ratS += ratDelta*(rat-ratMean);
//file.println( nf.format(sim) + " , " + nf.format(rat));
}
double simVar = simS/(k-1);
double ratVar = ratS/(k-1);
//file.close();
ret[0]=simMean;
ret[1]=simVar;
ret[2]=ratMean;
ret[3]=ratVar;
return ret;
}