本文整理汇总了Java中edu.stanford.nlp.math.ArrayMath.innerProduct方法的典型用法代码示例。如果您正苦于以下问题:Java ArrayMath.innerProduct方法的具体用法?Java ArrayMath.innerProduct怎么用?Java ArrayMath.innerProduct使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.math.ArrayMath
的用法示例。
在下文中一共展示了ArrayMath.innerProduct方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: mapDirByInverseHessian
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
void mapDirByInverseHessian() {
int count = sList.size();
if (count != 0) {
for (int i = count - 1; i >= 0; i--) {
//mheilman: The program will try to divide by zero here unless there is a check
//that the parameters change at each iteration. See comments in the minimize() method.
//A roList value is the inner product of the change in the gradient
//and the change in parameters between the current and last iterations.
//See the discussion of L-BFGS in Nocedal and Wright's Numerical Optimization book
//(though I think that defines rho as the multiplicative inverse of what is here).
alphas[i] = -ArrayMath.innerProduct(sList.get(i), dir) / roList.get(i);
ArrayMath.addMultInPlace(dir, yList.get(i), alphas[i]);
}
double[] lastY = yList.get(count - 1);
double yDotY = ArrayMath.innerProduct(lastY, lastY);
double scalar = roList.get(count - 1) / yDotY;
ArrayMath.multiplyInPlace(dir, scalar);
for (int i = 0; i < count; i++) {
double beta = ArrayMath.innerProduct(yList.get(i), dir) / roList.get(i);
ArrayMath.addMultInPlace(dir, sList.get(i), -alphas[i] - beta);
}
}
}
示例2: dirDeriv
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
double dirDeriv() {
if (l1weight == 0) {
return ArrayMath.innerProduct(dir, grad);
} else {
double val = 0.0;
for (int i = 0; i < dim; i++) {
//mheilman: I added this if-statement to avoid penalizing bias parameters.
if(OWLQN.biasParameters.contains(i)){
val += dir[i] * grad[i];
continue;
}
if (dir[i] != 0) {
if (x[i] < 0) {
val += dir[i] * (grad[i] - l1weight);
} else if (x[i] > 0) {
val += dir[i] * (grad[i] + l1weight);
} else if (dir[i] < 0) {
val += dir[i] * (grad[i] - l1weight);
} else if (dir[i] > 0) {
val += dir[i] * (grad[i] + l1weight);
}
}
}
return val;
}
}
示例3: computeDir
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
private void computeDir(double[] dir, double[] fg, double[] x, QNInfo qn, Function func)
throws SurpriseConvergence {
System.arraycopy(fg, 0, dir, 0, fg.length);
int mmm = qn.size();
double[] as = new double[mmm];
for (int i = mmm - 1; i >= 0; i--) {
as[i] = qn.getRho(i) * ArrayMath.innerProduct(qn.getS(i), dir);
plusAndConstMult(dir, qn.getY(i), -as[i], dir);
}
// multiply by hessian approximation
qn.applyInitialHessian(dir);
for (int i = 0; i < mmm; i++) {
double b = qn.getRho(i) * ArrayMath.innerProduct(qn.getY(i), dir);
plusAndConstMult(dir, qn.getS(i), as[i] - b, dir);
}
ArrayMath.multiplyInPlace(dir, -1);
if (useOWLQN) { // step (2) in Galen & Gao 2007
constrainSearchDir(dir, fg, x, func);
}
}
示例4: dirDeriv
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
double dirDeriv() {
if (l1weight == 0) {
return ArrayMath.innerProduct(dir, grad);
} else {
double val = 0.0;
for (int i = 0; i < dim; i++) {
if (OWLQN.biasParameters.contains(i)) {
val += dir[i] * grad[i];
continue;
}
if (dir[i] != 0) {
if (x[i] < 0) {
val += dir[i] * (grad[i] - l1weight);
} else if (x[i] > 0) {
val += dir[i] * (grad[i] + l1weight);
} else if (dir[i] < 0) {
val += dir[i] * (grad[i] - l1weight);
} else if (dir[i] > 0) {
val += dir[i] * (grad[i] + l1weight);
}
}
}
return val;
}
}
示例5: computeDir
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
private void computeDir(double[] dir, double[] fg) throws SQNMinimizer.SurpriseConvergence {
System.arraycopy(fg, 0, dir, 0, fg.length);
int mmm = sList.size();
double[] as = new double[mmm];
double[] factors = new double[dir.length];
for (int i = mmm - 1; i >= 0; i--) {
as[i] = roList.get(i) * ArrayMath.innerProduct(sList.get(i), dir);
plusAndConstMult(dir, yList.get(i), -as[i], dir);
}
// multiply by hessian approximation
if (mmm != 0) {
double[] y = yList.get(mmm - 1);
double yDotY = ArrayMath.innerProduct(y, y);
if (yDotY == 0) {
throw new SQNMinimizer.SurpriseConvergence("Y is 0!!");
}
double gamma = ArrayMath.innerProduct(sList.get(mmm - 1), y) / yDotY;
ArrayMath.multiplyInPlace(dir, gamma);
}else if(mmm == 0){
//This is a safety feature preventing too large of an initial step (see Yu Schraudolph Gunter)
ArrayMath.multiplyInPlace(dir,epsilon);
}
for (int i = 0; i < mmm; i++) {
double b = roList.get(i) * ArrayMath.innerProduct(yList.get(i), dir);
plusAndConstMult(dir, sList.get(i), cPosDef*as[i] - b, dir);
plusAndConstMult(ArrayMath.pairwiseMultiply(yList.get(i),sList.get(i)),factors,1,factors);
}
ArrayMath.multiplyInPlace(dir, -1);
}
示例6: mapDirByInverseHessian
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
void mapDirByInverseHessian() {
int count = sList.size();
if (count != 0) {
//check that the ro values are all nonzero.
//if they aren't, then don't use information about the hessian
//to change the descent direction.
for (int i = count - 1; i >= 0; i--) {
if (roList.get(i) == 0.0) {
return;
}
}
for (int i = count - 1; i >= 0; i--) {
alphas[i] = -ArrayMath.innerProduct(sList.get(i), dir) / roList.get(i);
ArrayMath.addMultInPlace(dir, yList.get(i), alphas[i]);
}
double[] lastY = yList.get(count - 1);
double yDotY = ArrayMath.innerProduct(lastY, lastY);
double scalar = roList.get(count - 1) / yDotY;
ArrayMath.multiplyInPlace(dir, scalar);
for (int i = 0; i < count; i++) {
double beta = ArrayMath.innerProduct(yList.get(i), dir) / roList.get(i);
ArrayMath.addMultInPlace(dir, sList.get(i), -alphas[i] - beta);
}
}
}
示例7: getIncrementalScoreInnerProduct
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
private double getIncrementalScoreInnerProduct(
DenseFeatureValueCollection<String> fva) {
return ArrayMath.innerProduct(fva.toDoubleArray(), weights);
}
示例8: shift
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
void shift() {
double[] nextS = null, nextY = null;
int listSize = sList.size();
if (listSize < m) {
try {
nextS = new double[dim];
nextY = new double[dim];
} catch (OutOfMemoryError e) {
m = listSize;
nextS = null;
}
}
if (nextS == null) {
nextS = sList.poll();
nextY = yList.poll();
roList.poll();
}
ArrayMath.addMultInto(nextS, newX, x, -1);
ArrayMath.addMultInto(nextY, newGrad, grad, -1);
double ro = ArrayMath.innerProduct(nextS, nextY);
assert(ro != 0.0);
sList.offer(nextS);
yList.offer(nextY);
roList.offer(ro);
double[] tmpX = newX;
newX = x;
x = tmpX;
// TODO: added: nschneid
/*if (OWLQN.isConstrained()) {
newX = OWLQN.projectWeights(newX);
x = OWLQN.projectWeights(x);
}*/
double[] tmpGrad = newGrad;
newGrad = grad;
grad = tmpGrad;
++iter;
}
示例9: lineSearchBacktrack
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
private double[] lineSearchBacktrack(Function func, double[] dir, double[] x,
double[] newX, double[] grad, double lastValue)
throws MaxEvaluationsExceeded {
double normGradInDir = ArrayMath.innerProduct(dir, grad);
say("(" + nf.format(normGradInDir) + ")");
if (normGradInDir > 0) {
say("{WARNING--- direction of positive gradient chosen!}");
}
// c1 can be anything between 0 and 1, exclusive (usu. 1/10 - 1/2)
double step, c1;
// for first few steps, we have less confidence in our initial step-size a
// so scale back quicker
if (its <= 2) {
step = 0.1;
c1 = 0.1;
} else {
step = 1.0;
c1 = 0.1;
}
// should be small e.g. 10^-5 ... 10^-1
double c = 0.01;
// double v = func.valueAt(x);
// c = c * mult(grad, dir);
c = c * normGradInDir;
double[] newPoint = new double[3];
while ((newPoint[f] = func.valueAt((plusAndConstMult(x, dir, step, newX)))) > lastValue
+ c * step) {
fevals += 1;
if (newPoint[f] < lastValue) {
// an improvement, but not good enough... suspicious!
say("!");
} else {
say(".");
}
step = c1 * step;
}
newPoint[a] = step;
fevals += 1;
if (fevals > maxFevals) {
throw new MaxEvaluationsExceeded(
" Exceeded during linesearch() Function ");
}
return newPoint;
}
示例10: testConditionNumber
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public double testConditionNumber(int samples){
double maxSeen = 0.0;
double minSeen = 0.0;
double[] thisV = new double[ thisFunc.domainDimension() ];
double[] thisX = new double[thisV.length];
gradFD = new double[thisV.length];
HvFD = new double[thisV.length];
double thisVHV;
boolean isNeg = false;
boolean isPos = false;
boolean isSemi = false;
thisFunc.method = StochasticCalculateMethods.ExternalFiniteDifference;
for(int j=0;j<samples;j++){
for (int i=0; i< thisV.length; i++){
thisV[i] = generator.nextDouble();
}
for (int i=0; i< thisX.length; i++){
thisX[i] = generator.nextDouble();
}
System.err.println("Evaluating Hessian Product");
System.arraycopy(thisFunc.derivativeAt(thisX,thisV,testBatchSize ), 0,gradFD, 0, gradFD.length);
thisFunc.recalculatePrevBatch = true;
System.arraycopy(thisFunc.HdotVAt(thisX,thisV,gradFD,testBatchSize),0,HvFD,0,HvFD.length);
thisVHV = ArrayMath.innerProduct(thisV,HvFD);
if( Math.abs(thisVHV) > maxSeen){
maxSeen = Math.abs(thisVHV);
}
if( Math.abs(thisVHV) < minSeen){
minSeen = Math.abs(thisVHV);
}
if( thisVHV < 0 ){
isNeg = true;
}
if( thisVHV > 0){
isPos = true;
}
if( thisVHV ==0 ){
isSemi = true;
}
System.err.println("It:" + j + " C:" + maxSeen/minSeen + "N:" + isNeg + "P:" + isPos + "S:" + isSemi);
}
System.out.println("Condition Number of: " + maxSeen/minSeen);
System.out.println("Is negative: " + isNeg);
System.out.println("Is positive: " + isPos);
System.out.println("Is semi: " + isSemi);
return maxSeen/minSeen;
}
示例11: getVariance
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public double[] getVariance(double[] x, int batchSize){
double[] ret = new double[4];
double[] fullHx = new double[thisFunc.domainDimension()];
double[] thisHx = new double[x.length];
double[] thisGrad = new double[x.length];
List<double[]> HxList = new ArrayList<double[]>();
/*
PrintWriter file = null;
NumberFormat nf = new DecimalFormat("0.000E0");
try{
file = new PrintWriter(new FileOutputStream("var.out"),true);
}
catch (IOException e){
System.err.println("Caught IOException outputing List to file: " + e.getMessage());
System.exit(1);
}
*/
//get the full hessian
thisFunc.sampleMethod = AbstractStochasticCachingDiffFunction.SamplingMethod.Ordered;
System.arraycopy(thisFunc.derivativeAt(x,x,thisFunc.dataDimension()),0,thisGrad,0,thisGrad.length);
System.arraycopy(thisFunc.HdotVAt(x,x,thisGrad,thisFunc.dataDimension()),0,fullHx,0,fullHx.length);
double fullNorm = ArrayMath.norm(fullHx);
double hessScale = ((double) thisFunc.dataDimension()) / ((double) batchSize);
thisFunc.sampleMethod = AbstractStochasticCachingDiffFunction.SamplingMethod.RandomWithReplacement;
int n = 100;
double simDelta;
double ratDelta;
double simMean = 0;
double ratMean = 0;
double simS = 0;
double ratS = 0;
int k = 0;
System.err.println(fullHx[4] +" " + x[4]);
for(int i = 0; i<n; i++){
System.arraycopy(thisFunc.derivativeAt(x,x,batchSize),0,thisGrad,0,thisGrad.length);
System.arraycopy(thisFunc.HdotVAt(x,x,thisGrad,batchSize),0,thisHx,0,thisHx.length);
ArrayMath.multiplyInPlace(thisHx,hessScale);
double thisNorm = ArrayMath.norm(thisHx);
double sim = ArrayMath.innerProduct(thisHx,fullHx)/(thisNorm*fullNorm);
double rat = thisNorm/fullNorm;
k += 1;
simDelta = sim - simMean;
simMean += simDelta/k;
simS += simDelta*(sim-simMean);
ratDelta = rat-ratMean;
ratMean += ratDelta/k;
ratS += ratDelta*(rat-ratMean);
//file.println( nf.format(sim) + " , " + nf.format(rat));
}
double simVar = simS/(k-1);
double ratVar = ratS/(k-1);
//file.close();
ret[0]=simMean;
ret[1]=simVar;
ret[2]=ratMean;
ret[3]=ratVar;
return ret;
}
示例12: shift
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
void shift() {
double[] nextS = null, nextY = null;
int listSize = sList.size();
if (listSize < m) {
try {
nextS = new double[dim];
nextY = new double[dim];
} catch (OutOfMemoryError e) {
m = listSize;
nextS = null;
}
}
if (nextS == null) {
nextS = sList.poll();
nextY = yList.poll();
roList.poll();
}
ArrayMath.addMultInto(nextS, newX, x, -1);
ArrayMath.addMultInto(nextY, newGrad, grad, -1);
double ro = ArrayMath.innerProduct(nextS, nextY);
assert (ro != 0.0);
sList.offer(nextS);
yList.offer(nextY);
roList.offer(ro);
double[] tmpX = newX;
newX = x;
x = tmpX;
double[] tmpGrad = newGrad;
newGrad = grad;
grad = tmpGrad;
++iter;
}