本文整理汇总了Java中edu.stanford.nlp.math.ArrayMath.multiplyInPlace方法的典型用法代码示例。如果您正苦于以下问题:Java ArrayMath.multiplyInPlace方法的具体用法?Java ArrayMath.multiplyInPlace怎么用?Java ArrayMath.multiplyInPlace使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.math.ArrayMath
的用法示例。
在下文中一共展示了ArrayMath.multiplyInPlace方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: greedyDecode
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* THIS CLOBBERS THE LABELS, stores its decoding into them.
* Does progressive rolling edge feature extraction
**/
public void greedyDecode(ModelSentence sentence, boolean storeConfidences) {
int T = sentence.T;
sentence.labels = new int[T];
sentence.edgeFeatures[0] = startMarker();
if (storeConfidences) sentence.confidences = new double[T];
double[] labelScores = new double[numLabels];
for (int t=0; t<T; t++) {
computeLabelScores(t, sentence, labelScores);
sentence.labels[t] = ArrayMath.argmax(labelScores);
if (t < T-1)
sentence.edgeFeatures[t+1] = sentence.labels[t];
if (storeConfidences) {
ArrayMath.expInPlace(labelScores);
double Z = ArrayMath.sum(labelScores);
ArrayMath.multiplyInPlace(labelScores, 1.0/Z);
sentence.confidences[t] = labelScores[ sentence.labels[t] ];
}
}
}
示例2: mapDirByInverseHessian
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
void mapDirByInverseHessian() {
int count = sList.size();
if (count != 0) {
for (int i = count - 1; i >= 0; i--) {
//mheilman: The program will try to divide by zero here unless there is a check
//that the parameters change at each iteration. See comments in the minimize() method.
//A roList value is the inner product of the change in the gradient
//and the change in parameters between the current and last iterations.
//See the discussion of L-BFGS in Nocedal and Wright's Numerical Optimization book
//(though I think that defines rho as the multiplicative inverse of what is here).
alphas[i] = -ArrayMath.innerProduct(sList.get(i), dir) / roList.get(i);
ArrayMath.addMultInPlace(dir, yList.get(i), alphas[i]);
}
double[] lastY = yList.get(count - 1);
double yDotY = ArrayMath.innerProduct(lastY, lastY);
double scalar = roList.get(count - 1) / yDotY;
ArrayMath.multiplyInPlace(dir, scalar);
for (int i = 0; i < count; i++) {
double beta = ArrayMath.innerProduct(yList.get(i), dir) / roList.get(i);
ArrayMath.addMultInPlace(dir, sList.get(i), -alphas[i] - beta);
}
}
}
示例3: applyInitialHessian
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public double[] applyInitialHessian(double[] x) {
switch (scaleOpt) {
case SCALAR:
say("I");
ArrayMath.multiplyInPlace(x, gamma);
break;
case DIAGONAL:
say("D");
if (d != null) {
// Check sizes
if (x.length != d.length) {
throw new IllegalArgumentException("Vector of incorrect size passed to applyInitialHessian in QNInfo class");
}
// Scale element-wise
for (int i = 0; i < x.length; i++) {
x[i] = x[i] / (d[i]);
}
}
break;
}
return x;
}
示例4: computeDir
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
private void computeDir(double[] dir, double[] fg, double[] x, QNInfo qn, Function func)
throws SurpriseConvergence {
System.arraycopy(fg, 0, dir, 0, fg.length);
int mmm = qn.size();
double[] as = new double[mmm];
for (int i = mmm - 1; i >= 0; i--) {
as[i] = qn.getRho(i) * ArrayMath.innerProduct(qn.getS(i), dir);
plusAndConstMult(dir, qn.getY(i), -as[i], dir);
}
// multiply by hessian approximation
qn.applyInitialHessian(dir);
for (int i = 0; i < mmm; i++) {
double b = qn.getRho(i) * ArrayMath.innerProduct(qn.getY(i), dir);
plusAndConstMult(dir, qn.getS(i), as[i] - b, dir);
}
ArrayMath.multiplyInPlace(dir, -1);
if (useOWLQN) { // step (2) in Galen & Gao 2007
constrainSearchDir(dir, fg, x, func);
}
}
示例5: derivativeAt
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
@Override
public double[] derivativeAt(double[] flatCoefs) {
double[] g = new double[model.flatIDsize()];
model.setCoefsFromFlat(flatCoefs);
for (ModelSentence s : mSentences) {
model.computeGradient(s, g);
}
ArrayMath.multiplyInPlace(g, -1);
addL2regularizerGradient(g, flatCoefs);
return g;
}
示例6: samplePositionHelper
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
/**
* Samples a single position in the sequence.
* Does not modify the sequence passed in.
* returns the score of the new label for the position to sample
* @param sequence the sequence to start with
* @param pos the position to sample.
* @param temperature the temperature to control annealing
*/
private Pair<Integer, Double> samplePositionHelper(SequenceModel model, int[] sequence, int pos, double temperature) {
double[] distribution = model.scoresOf(sequence, pos);
if (temperature!=1.0) {
if (temperature==0.0) {
// set the max to 1.0
int argmax = ArrayMath.argmax(distribution);
Arrays.fill(distribution, Double.NEGATIVE_INFINITY);
distribution[argmax] = 0.0;
} else {
// take all to a power
// use the temperature to increase/decrease the entropy of the sampling distribution
ArrayMath.multiplyInPlace(distribution, 1.0/temperature);
}
}
ArrayMath.logNormalize(distribution);
ArrayMath.expInPlace(distribution);
if (BisequenceEmpiricalNERPrior.DEBUG) {
if (BisequenceEmpiricalNERPrior.debugIndices.indexOf(pos) != -1) {
System.err.println("final model:");
for (int j = 0; j < distribution.length; j++)
System.err.println("\t" + distribution[j]);
System.err.println();
}
}
int newTag = ArrayMath.sampleFromDistribution(distribution, random);
double newProb = distribution[newTag];
return new Pair<Integer, Double>(newTag, newProb);
}
示例7: smooth
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
protected static double[] smooth(List<double[]> toSmooth){
double[] smoothed = new double[toSmooth.get(0).length];
for(double[] thisArray:toSmooth){
ArrayMath.pairwiseAddInPlace(smoothed,thisArray);
}
ArrayMath.multiplyInPlace(smoothed,1/((double) toSmooth.size() ));
return smoothed;
}
示例8: computeDir
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
private void computeDir(double[] dir, double[] fg) throws SQNMinimizer.SurpriseConvergence {
System.arraycopy(fg, 0, dir, 0, fg.length);
int mmm = sList.size();
double[] as = new double[mmm];
double[] factors = new double[dir.length];
for (int i = mmm - 1; i >= 0; i--) {
as[i] = roList.get(i) * ArrayMath.innerProduct(sList.get(i), dir);
plusAndConstMult(dir, yList.get(i), -as[i], dir);
}
// multiply by hessian approximation
if (mmm != 0) {
double[] y = yList.get(mmm - 1);
double yDotY = ArrayMath.innerProduct(y, y);
if (yDotY == 0) {
throw new SQNMinimizer.SurpriseConvergence("Y is 0!!");
}
double gamma = ArrayMath.innerProduct(sList.get(mmm - 1), y) / yDotY;
ArrayMath.multiplyInPlace(dir, gamma);
}else if(mmm == 0){
//This is a safety feature preventing too large of an initial step (see Yu Schraudolph Gunter)
ArrayMath.multiplyInPlace(dir,epsilon);
}
for (int i = 0; i < mmm; i++) {
double b = roList.get(i) * ArrayMath.innerProduct(yList.get(i), dir);
plusAndConstMult(dir, sList.get(i), cPosDef*as[i] - b, dir);
plusAndConstMult(ArrayMath.pairwiseMultiply(yList.get(i),sList.get(i)),factors,1,factors);
}
ArrayMath.multiplyInPlace(dir, -1);
}
示例9: scaleFeaturesGaussian
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public void scaleFeaturesGaussian() {
means = new double[this.numFeatures()];
Arrays.fill(means, 0);
for (int i = 0; i < this.size(); i++) {
for (int j = 0; j < data[i].length; j++)
means[data[i][j]] += values[i][j];
}
ArrayMath.multiplyInPlace(means, 1.0 / this.size());
stdevs = new double[this.numFeatures()];
Arrays.fill(stdevs, 0);
double[] deltaX = new double[this.numFeatures()];
for (int i = 0; i < this.size(); i++) {
for (int f = 0; f < this.numFeatures(); f++)
deltaX[f] = -means[f];
for (int j = 0; j < data[i].length; j++)
deltaX[data[i][j]] += values[i][j];
for (int f = 0; f < this.numFeatures(); f++) {
stdevs[f] += deltaX[f] * deltaX[f];
}
}
for (int f = 0; f < this.numFeatures(); f++) {
stdevs[f] /= (this.size() - 1);
stdevs[f] = Math.sqrt(stdevs[f]);
}
for (int i = 0; i < this.size(); i++) {
for (int j = 0; j < data[i].length; j++) {
int fID = data[i][j];
if (stdevs[fID] != 0)
values[i][j] = (values[i][j] - means[fID]) / stdevs[fID];
}
}
}
示例10: mapDirByInverseHessian
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
void mapDirByInverseHessian() {
int count = sList.size();
if (count != 0) {
//check that the ro values are all nonzero.
//if they aren't, then don't use information about the hessian
//to change the descent direction.
for (int i = count - 1; i >= 0; i--) {
if (roList.get(i) == 0.0) {
return;
}
}
for (int i = count - 1; i >= 0; i--) {
alphas[i] = -ArrayMath.innerProduct(sList.get(i), dir) / roList.get(i);
ArrayMath.addMultInPlace(dir, yList.get(i), alphas[i]);
}
double[] lastY = yList.get(count - 1);
double yDotY = ArrayMath.innerProduct(lastY, lastY);
double scalar = roList.get(count - 1) / yDotY;
ArrayMath.multiplyInPlace(dir, scalar);
for (int i = 0; i < count; i++) {
double beta = ArrayMath.innerProduct(yList.get(i), dir) / roList.get(i);
ArrayMath.addMultInPlace(dir, sList.get(i), -alphas[i] - beta);
}
}
}
示例11: getVariance
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public double[] getVariance(double[] x, int batchSize){
double[] ret = new double[4];
double[] fullHx = new double[thisFunc.domainDimension()];
double[] thisHx = new double[x.length];
double[] thisGrad = new double[x.length];
List<double[]> HxList = new ArrayList<double[]>();
/*
PrintWriter file = null;
NumberFormat nf = new DecimalFormat("0.000E0");
try{
file = new PrintWriter(new FileOutputStream("var.out"),true);
}
catch (IOException e){
System.err.println("Caught IOException outputing List to file: " + e.getMessage());
System.exit(1);
}
*/
//get the full hessian
thisFunc.sampleMethod = AbstractStochasticCachingDiffFunction.SamplingMethod.Ordered;
System.arraycopy(thisFunc.derivativeAt(x,x,thisFunc.dataDimension()),0,thisGrad,0,thisGrad.length);
System.arraycopy(thisFunc.HdotVAt(x,x,thisGrad,thisFunc.dataDimension()),0,fullHx,0,fullHx.length);
double fullNorm = ArrayMath.norm(fullHx);
double hessScale = ((double) thisFunc.dataDimension()) / ((double) batchSize);
thisFunc.sampleMethod = AbstractStochasticCachingDiffFunction.SamplingMethod.RandomWithReplacement;
int n = 100;
double simDelta;
double ratDelta;
double simMean = 0;
double ratMean = 0;
double simS = 0;
double ratS = 0;
int k = 0;
System.err.println(fullHx[4] +" " + x[4]);
for(int i = 0; i<n; i++){
System.arraycopy(thisFunc.derivativeAt(x,x,batchSize),0,thisGrad,0,thisGrad.length);
System.arraycopy(thisFunc.HdotVAt(x,x,thisGrad,batchSize),0,thisHx,0,thisHx.length);
ArrayMath.multiplyInPlace(thisHx,hessScale);
double thisNorm = ArrayMath.norm(thisHx);
double sim = ArrayMath.innerProduct(thisHx,fullHx)/(thisNorm*fullNorm);
double rat = thisNorm/fullNorm;
k += 1;
simDelta = sim - simMean;
simMean += simDelta/k;
simS += simDelta*(sim-simMean);
ratDelta = rat-ratMean;
ratMean += ratDelta/k;
ratS += ratDelta*(rat-ratMean);
//file.println( nf.format(sim) + " , " + nf.format(rat));
}
double simVar = simS/(k-1);
double ratVar = ratS/(k-1);
//file.close();
ret[0]=simMean;
ret[1]=simVar;
ret[2]=ratMean;
ret[3]=ratVar;
return ret;
}
示例12: trainWeightsUsingDoubleCRF
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
protected double[] trainWeightsUsingDoubleCRF(int[][][][] data, int[][] labels, Evaluator[] evaluators, int pruneFeatureItr, double[][][][] featureVals) {
CRFLogConditionalObjectiveFunction func = new CRFLogConditionalObjectiveFunction(data, labels,
windowSize, classIndex, labelIndices, map, flags.priorType, flags.backgroundSymbol, flags.sigma, featureVals);
cliquePotentialFunctionHelper = func;
Minimizer minimizer = getMinimizer(pruneFeatureItr, evaluators);
double[] initialWeights;
if (flags.initialWeights == null) {
initialWeights = func.initial();
} else {
try {
System.err.println("Reading initial weights from file " + flags.initialWeights);
DataInputStream dis = new DataInputStream(new BufferedInputStream(new GZIPInputStream(new FileInputStream(
flags.initialWeights))));
initialWeights = ConvertByteArray.readDoubleArr(dis);
} catch (IOException e) {
throw new RuntimeException("Could not read from double initial weight file " + flags.initialWeights);
}
}
System.err.println("numWeights: " + initialWeights.length);
if (flags.testObjFunction) {
StochasticDiffFunctionTester tester = new StochasticDiffFunctionTester(func);
if (tester.testSumOfBatches(initialWeights, 1e-4)) {
System.err.println("Testing complete... exiting");
System.exit(1);
} else {
System.err.println("Testing failed....exiting");
System.exit(1);
}
}
//check gradient
if (flags.checkGradient) {
if (func.gradientCheck()) {
System.err.println("gradient check passed");
} else {
throw new RuntimeException("gradient check failed");
}
}
double[] ws = minimizer.minimize(func, flags.tolerance, initialWeights);
if (flags.inputDropOut != 0.0) {
// scale the weights since they won't be dropped at test time
ArrayMath.multiplyInPlace(ws, 1.0/(1.0 - flags.inputDropOut));
System.err.printf("Scaled weights by %f", 1.0/(1.0 - flags.inputDropOut));
}
return ws;
}