本文整理汇总了Java中cc.mallet.types.MatrixOps.setAll方法的典型用法代码示例。如果您正苦于以下问题:Java MatrixOps.setAll方法的具体用法?Java MatrixOps.setAll怎么用?Java MatrixOps.setAll使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cc.mallet.types.MatrixOps
的用法示例。
在下文中一共展示了MatrixOps.setAll方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double [] buffer)
{
// PriorGradient is -parameter/gaussianPriorVariance
// Gradient is (constraint - expectation + PriorGradient)
// == -(expectation - constraint - PriorGradient).
// Gradient points "up-hill", i.e. in the direction of higher value
if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
getValue (); // This will fill in the this.expectation, updating it if necessary
MatrixOps.setAll(cachedGradie, 0);
double[] b2 = new double[buffer.length];
for (int i = 0; i < opts.length; i++) {
MatrixOps.setAll(b2, 0);
opts[i].getValueGradient(b2);
MatrixOps.plusEquals(cachedGradie, b2);
}
cachedGradientWeightsStamp = crf.weightsValueChangeStamp;
}
System.arraycopy(cachedGradie, 0, buffer, 0, cachedGradie.length);
}
示例2: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double [] buffer) {
if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
getValue ();
MatrixOps.setAll(cachedGradient, 0);
double[] b2 = new double[buffer.length];
for (int i = 0; i < optimizables.length; i++) {
MatrixOps.setAll(b2, 0);
optimizables[i].getValueGradient(b2);
MatrixOps.plusEquals(cachedGradient, b2);
}
cachedGradientWeightsStamp = crf.weightsValueChangeStamp;
}
System.arraycopy(cachedGradient, 0, buffer, 0, cachedGradient.length);
}
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:15,代码来源:CRFOptimizableByGradientValues.java
示例3: OrthantWiseLimitedMemoryBFGS
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public OrthantWiseLimitedMemoryBFGS(Optimizable.ByGradientValue function,
double l1wt) {
this.optimizable = function;
this.l1Weight = l1wt;
String parts[] = optimizable.getClass().getName().split("\\.");
this.optName = parts[parts.length - 1];
// initialize optimizer state
iterations = 0;
s = new LinkedList<double[]>();
y = new LinkedList<double[]>();
rhos = new LinkedList<Double>();
alphas = new double[m];
MatrixOps.setAll(alphas, 0.0);
yDotY = 0;
int numParameters = optimizable.getNumParameters();
// get initial parameters
parameters = new double[numParameters];
optimizable.getParameters(parameters);
// get initial value
value = evalL1();
// get initial gradient
grad = new double[numParameters];
evalGradient();
// initialize direction
direction = new double[numParameters];
steepestDescentDirection = new double[numParameters];
// initialize backups
oldParameters = new double[numParameters];
oldGrad = new double[numParameters];
}
示例4: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double[] buffer) {
double[] b2 = new double[buffer.length];
for (Optimizable.ByGradientValue o : optimizables) {
MatrixOps.setAll(b2, 0);
o.getValueGradient(b2);
MatrixOps.plusEquals(buffer, b2);
}
}
示例5: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double[] buffer) {
double[] b2 = new double[buffer.length];
for (ByGradientValue o : optimizables) {
MatrixOps.setAll(b2, 0);
o.getValueGradient(b2);
MatrixOps.plusEquals(buffer, b2);
}
}
示例6: getValue
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public double getValue ()
{
if (cachedValueStale) {
numGetValueCalls++;
cachedValue = 0;
// We'll store the expectation values in "cachedGradient" for now
cachedGradientStale = true;
MatrixOps.setAll (cachedGradient, 0.0);
// Incorporate likelihood of data
double[] scores = new double[trainingList.getTargetAlphabet().size()];
double value = 0.0;
Iterator<Instance> iter = trainingList.iterator();
int ii=0;
while (iter.hasNext()) {
ii++;
Instance instance = iter.next();
double instanceWeight = trainingList.getInstanceWeight(instance);
Labeling labeling = instance.getLabeling ();
if (labeling == null)
continue;
//System.out.println("L Now "+inputAlphabet.size()+" regular features.");
this.theClassifier.getClassificationScores (instance, scores);
FeatureVector fv = (FeatureVector) instance.getData ();
int li = labeling.getBestIndex();
value = - (instanceWeight * Math.log (scores[li]));
if(Double.isNaN(value)) {
logger.fine ("MaxEntTrainer: Instance " + instance.getName() +
"has NaN value. log(scores)= " + Math.log(scores[li]) +
" scores = " + scores[li] +
" has instance weight = " + instanceWeight);
}
if (Double.isInfinite(value)) {
logger.warning ("Instance "+instance.getSource() + " has infinite value; skipping value and gradient");
cachedValue -= value;
cachedValueStale = false;
return -value;
// continue;
}
cachedValue += value;
for (int si = 0; si < scores.length; si++) {
if (scores[si] == 0) continue;
assert (!Double.isInfinite(scores[si]));
MatrixOps.rowPlusEquals (cachedGradient, numFeatures,
si, fv, -instanceWeight * scores[si]);
cachedGradient[numFeatures*si + defaultFeatureIndex] += (-instanceWeight * scores[si]);
}
}
//logger.info ("-Expectations:"); cachedGradient.print();
// Incorporate prior on parameters
double prior = 0;
if (usingHyperbolicPrior) {
for (int li = 0; li < numLabels; li++)
for (int fi = 0; fi < numFeatures; fi++)
prior += (hyperbolicPriorSlope / hyperbolicPriorSharpness
* Math.log (Maths.cosh (hyperbolicPriorSharpness * parameters[li *numFeatures + fi])));
}
else if (usingGaussianPrior) {
for (int li = 0; li < numLabels; li++)
for (int fi = 0; fi < numFeatures; fi++) {
double param = parameters[li*numFeatures + fi];
prior += param * param / (2 * gaussianPriorVariance);
}
}
double oValue = cachedValue;
cachedValue += prior;
cachedValue *= -1.0; // MAXIMIZE, NOT MINIMIZE
cachedValueStale = false;
progressLogger.info ("Value (labelProb="+oValue+" prior="+prior+") loglikelihood = "+cachedValue);
}
return cachedValue;
}
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:76,代码来源:MaxEntOptimizableByLabelLikelihood.java