本文整理汇总了Java中cc.mallet.types.MatrixOps.plusEquals方法的典型用法代码示例。如果您正苦于以下问题:Java MatrixOps.plusEquals方法的具体用法?Java MatrixOps.plusEquals怎么用?Java MatrixOps.plusEquals使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cc.mallet.types.MatrixOps
的用法示例。
在下文中一共展示了MatrixOps.plusEquals方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double [] buffer)
{
// PriorGradient is -parameter/gaussianPriorVariance
// Gradient is (constraint - expectation + PriorGradient)
// == -(expectation - constraint - PriorGradient).
// Gradient points "up-hill", i.e. in the direction of higher value
if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
getValue (); // This will fill in the this.expectation, updating it if necessary
MatrixOps.setAll(cachedGradie, 0);
double[] b2 = new double[buffer.length];
for (int i = 0; i < opts.length; i++) {
MatrixOps.setAll(b2, 0);
opts[i].getValueGradient(b2);
MatrixOps.plusEquals(cachedGradie, b2);
}
cachedGradientWeightsStamp = crf.weightsValueChangeStamp;
}
System.arraycopy(cachedGradie, 0, buffer, 0, cachedGradie.length);
}
示例2: computeHessianProduct
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
private void computeHessianProduct(Optimizable.ByBatchGradient maxable,
double[] parameters, int batchIndex, int[] batchAssignments,
double[] currentGradient, double[] vector, double[] result) {
int numParameters = maxable.getNumParameters();
double eps = 1.0e-6;
double[] epsGradient = new double[numParameters];
double[] oldParameters = new double[numParameters];
// adjust parameters by (eps * vector) and recompute gradient
System.arraycopy(parameters,0,oldParameters,0,numParameters);
MatrixOps.plusEquals(parameters, vector, eps);
maxable.setParameters(parameters);
maxable.getBatchValueGradient(epsGradient, batchIndex, batchAssignments);
// restore old parameters
maxable.setParameters(oldParameters);
// calculate Hessian product
for (int index = 0; index < result.length; index++) {
result[index] = (-epsGradient[index] - currentGradient[index]) / eps;
}
}
示例3: mapDirByInverseHessian
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
/**
* Adjusts direction based on approximate hessian inverse.
*
* @param yDotY
* y^T * y in BFGS calculation.
*/
private void mapDirByInverseHessian(double yDotY) {
if (s.size() == 0)
return;
int count = s.size();
for (int i = count - 1; i >= 0; i--) {
alphas[i] = -MatrixOps.dotProduct(s.get(i), direction)
/ rhos.get(i);
MatrixOps.plusEquals(direction, y.get(i), alphas[i]);
}
double scalar = rhos.get(count - 1) / yDotY;
logger.fine("Direction multiplier = " + scalar);
MatrixOps.timesEquals(direction, scalar);
for (int i = 0; i < count; i++) {
double beta = MatrixOps.dotProduct(y.get(i), direction)
/ rhos.get(i);
MatrixOps.plusEquals(direction, s.get(i), -alphas[i] - beta);
}
}
示例4: setTargetsUsingData
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
/**
* Set target distributions using estimates from data.
*
* @param list InstanceList used to estimate targets.
* @param features List of features for constraints.
* @param normalize Whether to normalize by feature counts
* @return Constraints (map of feature index to target), with targets
* set using estimates from supplied data.
*/
public static HashMap<Integer,double[]> setTargetsUsingData(InstanceList list, ArrayList<Integer> features, boolean useValues, boolean normalize) {
HashMap<Integer,double[]> constraints = new HashMap<Integer,double[]>();
double[][] featureLabelCounts = getFeatureLabelCounts(list,useValues);
for (int i = 0; i < features.size(); i++) {
int fi = features.get(i);
if (fi != list.getDataAlphabet().size()) {
double[] prob = featureLabelCounts[fi];
if (normalize) {
// Smooth probability distributions by adding a (very)
// small count. We just need to make sure they aren't
// zero in which case the KL-divergence is infinite.
MatrixOps.plusEquals(prob, 1e-8);
MatrixOps.timesEquals(prob, 1./MatrixOps.sum(prob));
}
constraints.put(fi, prob);
}
}
return constraints;
}
示例5: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double [] buffer) {
if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
getValue ();
MatrixOps.setAll(cachedGradient, 0);
double[] b2 = new double[buffer.length];
for (int i = 0; i < optimizables.length; i++) {
MatrixOps.setAll(b2, 0);
optimizables[i].getValueGradient(b2);
MatrixOps.plusEquals(cachedGradient, b2);
}
cachedGradientWeightsStamp = crf.weightsValueChangeStamp;
}
System.arraycopy(cachedGradient, 0, buffer, 0, cachedGradient.length);
}
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:15,代码来源:CRFOptimizableByGradientValues.java
示例6: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double[] buffer) {
double[] b2 = new double[buffer.length];
for (Optimizable.ByGradientValue o : optimizables) {
MatrixOps.setAll(b2, 0);
o.getValueGradient(b2);
MatrixOps.plusEquals(buffer, b2);
}
}
示例7: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double [] buffer)
{
// Gradient is (constraint - expectation - parameters/gaussianPriorVariance)
if (cachedGradientStale) {
if (cachedValueStale)
// This will fill in the cachedGradient with the "-expectation"
getValue ();
MatrixOps.plusEquals (cachedGradient, constraints);
// Incorporate prior on parameters
MatrixOps.plusEquals (cachedGradient, parameters, -1.0 / gaussianPriorVariance);
// A parameter may be set to -infinity by an external user.
// We set gradient to 0 because the parameter's value can
// never change anyway and it will mess up future calculations
// on the matrix, such as norm().
MatrixOps.substitute (cachedGradient, Double.NEGATIVE_INFINITY, 0.0);
// Set to zero all the gradient dimensions that are not among the selected features
if (perLabelFeatureSelection == null) {
for (int labelIndex = 0; labelIndex < numLabels; labelIndex++)
MatrixOps.rowSetAll (cachedGradient, numFeatures,
labelIndex, 0.0, featureSelection, false);
} else {
for (int labelIndex = 0; labelIndex < numLabels; labelIndex++)
MatrixOps.rowSetAll (cachedGradient, numFeatures,
labelIndex, 0.0,
perLabelFeatureSelection[labelIndex], false);
}
cachedGradientStale = false;
}
assert (buffer != null && buffer.length == parameters.length);
System.arraycopy (cachedGradient, 0, buffer, 0, cachedGradient.length);
}
示例8: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double [] buffer) {
// Gradient is (constraint - expectation - parameters/gaussianPriorVariance)
if (cachedGradientStale) {
numGetValueGradientCalls++;
if (cachedValueStale)
// This will fill in the cachedGradient with the "-expectation"
getValue ();
MatrixOps.plusEquals (cachedGradient, constraints);
// Incorporate prior on parameters
if (usingHyperbolicPrior) {
throw new UnsupportedOperationException ("Hyperbolic prior not yet implemented.");
}
else if (usingGaussianPrior) {
MatrixOps.plusEquals (cachedGradient, parameters,
-1.0 / gaussianPriorVariance);
}
// A parameter may be set to -infinity by an external user.
// We set gradient to 0 because the parameter's value can
// never change anyway and it will mess up future calculations
// on the matrix, such as norm().
MatrixOps.substitute (cachedGradient, Double.NEGATIVE_INFINITY, 0.0);
// Set to zero all the gradient dimensions that are not among the selected features
if (perLabelFeatureSelection == null) {
for (int labelIndex = 0; labelIndex < numLabels; labelIndex++)
MatrixOps.rowSetAll (cachedGradient, numFeatures,
labelIndex, 0.0, featureSelection, false);
} else {
for (int labelIndex = 0; labelIndex < numLabels; labelIndex++)
MatrixOps.rowSetAll (cachedGradient, numFeatures,
labelIndex, 0.0,
perLabelFeatureSelection[labelIndex], false);
}
cachedGradientStale = false;
}
assert (buffer != null && buffer.length == parameters.length);
System.arraycopy (cachedGradient, 0, buffer, 0, cachedGradient.length);
//System.out.println ("MaxEntTrainer gradient infinity norm = "+MatrixOps.infinityNorm(cachedGradient));
}
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:41,代码来源:MaxEntOptimizableByLabelLikelihood.java
示例9: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double [] buffer)
{
// Gradient is (constraint - expectation - parameters/gaussianPriorVariance)
if (cachedGradientStale) {
numGetValueGradientCalls++;
if (cachedValueStale)
// This will fill in the cachedGradient with the "-expectation"
getValue ();
MatrixOps.plusEquals (cachedGradient, constraints);
// Incorporate prior on parameters
MatrixOps.plusEquals (cachedGradient, parameters,
-1.0 / gaussianPriorVariance);
// A parameter may be set to -infinity by an external user.
// We set gradient to 0 because the parameter's value can
// never change anyway and it will mess up future calculations
// on the matrix, such as norm().
MatrixOps.substitute (cachedGradient, Double.NEGATIVE_INFINITY, 0.0);
// Set to zero all the gradient dimensions that are not among the selected features
if (perLabelFeatureSelection == null) {
for (int labelIndex = 0; labelIndex < numLabels; labelIndex++)
MatrixOps.rowSetAll (cachedGradient, numFeatures,
labelIndex, 0.0, featureSelection, false);
} else {
for (int labelIndex = 0; labelIndex < numLabels; labelIndex++)
MatrixOps.rowSetAll (cachedGradient, numFeatures,
labelIndex, 0.0,
perLabelFeatureSelection[labelIndex], false);
}
cachedGradientStale = false;
}
assert (buffer != null && buffer.length == parameters.length);
System.arraycopy (cachedGradient, 0, buffer, 0, cachedGradient.length);
//System.out.println ("MaxEntTrainer gradient infinity norm = "+MatrixOps.infinityNorm(cachedGradient));
}
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:37,代码来源:MaxEntOptimizableByLabelDistribution.java
示例10: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double[] buffer) {
double[] b2 = new double[buffer.length];
for (ByGradientValue o : optimizables) {
MatrixOps.setAll(b2, 0);
o.getValueGradient(b2);
MatrixOps.plusEquals(buffer, b2);
}
}
示例11: optimize
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public boolean optimize (int numIterations)
{
int iterations;
double[] params = new double[maxable.getNumParameters()];
double[] gis = new double[maxable.getNumParameters()];
double[] old_params = new double[maxable.getNumParameters()];
double[] updates = new double[maxable.getNumParameters()];
maxable.getParameters(params);
maxable.getParameters(gis);
maxable.getParameters(old_params);
for (iterations = 0; iterations < numIterations; iterations++) {
boolean complete = false;
double old = maxable.getValue();
maxable.getGISUpdate(updates);
MatrixOps.plusEquals(gis,updates);
MatrixOps.plusEquals(params,updates,eta);
maxable.setParameters(params);
double next = maxable.getValue();
// Different from normal AGIS, only fall back to GIS updates
// If log-likelihood gets worse
// i.e. if lower log-likelihood, always make AGIS update
if(next > old) {
complete = true;
// don't let eta get too large
if(eta*alpha < 99999999.0)
eta = eta*alpha;
}
if(backTrack && complete == false) {
// gone too far
// unlike Roweis et al., we will back track on eta to find
// acceptable value, instead of automatically setting it to 1
while(eta > 1.0 && complete == false) {
eta = eta/2.0;
MatrixOps.set(params,old_params);
MatrixOps.plusEquals(params,updates,eta);
maxable.setParameters(params);
next = maxable.getValue();
if(next > old)
complete = true;
}
}
else if(complete == false) {
maxable.setParameters(gis);
eta = 1.0;
next = maxable.getValue();
}
logger.info("eta: " + eta);
if (2.0*Math.abs(next-old) <= tolerance*(Math.abs(next)+Math.abs(old)+eps)) {
converged = true;
return true;
}
if(numIterations > 1) {
maxable.getParameters(params);
maxable.getParameters(old_params);
maxable.getParameters(gis);
}
}
converged = false;
return false;
}
示例12: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double [] buffer)
{
// Gradient is (constraint - expectation - parameters/gaussianPriorVariance)
if (cachedGradientStale) {
numGetValueGradientCalls++;
if (cachedValueStale)
// This will fill in the cachedGradient with the "-expectation"
getValue ();
// cachedGradient contains the negative expectations
// expectations are model expectations and constraints are
// empirical expectations
MatrixOps.plusEquals (cachedGradient, constraints);
// CPAL - we need a second copy of the constraints
// - actually, we only want this for the feature values
// - I've moved this up into getValue
//if (usingMultiConditionalTraining){
// MatrixOps.plusEquals(cachedGradient, constraints);
//}
// Incorporate prior on parameters
if (usingHyperbolicPrior) {
throw new UnsupportedOperationException ("Hyperbolic prior not yet implemented.");
}
else {
MatrixOps.plusEquals (cachedGradient, parameters,
-1.0 / gaussianPriorVariance);
}
// A parameter may be set to -infinity by an external user.
// We set gradient to 0 because the parameter's value can
// never change anyway and it will mess up future calculations
// on the matrix, such as norm().
MatrixOps.substitute (cachedGradient, Double.NEGATIVE_INFINITY, 0.0);
// Set to zero all the gradient dimensions that are not among the selected features
if (perLabelFeatureSelection == null) {
for (int labelIndex = 0; labelIndex < numLabels; labelIndex++)
MatrixOps.rowSetAll (cachedGradient, numFeatures,
labelIndex, 0.0, featureSelection, false);
} else {
for (int labelIndex = 0; labelIndex < numLabels; labelIndex++)
MatrixOps.rowSetAll (cachedGradient, numFeatures,
labelIndex, 0.0,
perLabelFeatureSelection[labelIndex], false);
}
cachedGradientStale = false;
}
assert (buffer != null && buffer.length == parameters.length);
System.arraycopy (cachedGradient, 0, buffer, 0, cachedGradient.length);
}
示例13: labelFeatures
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
/**
* Label features using heuristic described in
* "Learning from Labeled Features using Generalized Expectation Criteria"
* Gregory Druck, Gideon Mann, Andrew McCallum.
*
* @param list InstanceList used to compute statistics for labeling features.
* @param features List of features to label.
* @param reject Whether to reject labeling features.
* @return Labeled features, HashMap mapping feature indices to list of labels.
*/
public static HashMap<Integer, ArrayList<Integer>> labelFeatures(InstanceList list, ArrayList<Integer> features, boolean reject) {
HashMap<Integer,ArrayList<Integer>> labeledFeatures = new HashMap<Integer,ArrayList<Integer>>();
double[][] featureLabelCounts = getFeatureLabelCounts(list,true);
int numLabels = list.getTargetAlphabet().size();
int minRank = 100 * numLabels;
InfoGain infogain = new InfoGain(list);
double sum = 0;
for (int rank = 0; rank < minRank; rank++) {
sum += infogain.getValueAtRank(rank);
}
double mean = sum / minRank;
for (int i = 0; i < features.size(); i++) {
int fi = features.get(i);
// reject features with infogain
// less than cutoff
if (reject && infogain.value(fi) < mean) {
//System.err.println("Oracle labeler rejected labeling: " + list.getDataAlphabet().lookupObject(fi));
logger.info("Oracle labeler rejected labeling: " + list.getDataAlphabet().lookupObject(fi));
continue;
}
double[] prob = featureLabelCounts[fi];
MatrixOps.plusEquals(prob,1e-8);
MatrixOps.timesEquals(prob, 1./MatrixOps.sum(prob));
int[] sortedIndices = getMaxIndices(prob);
ArrayList<Integer> labels = new ArrayList<Integer>();
if (numLabels > 2) {
// take anything within a factor of 2 of the best
// but no more than numLabels/2
boolean discard = false;
double threshold = prob[sortedIndices[0]] / 2;
for (int li = 0; li < numLabels; li++) {
if (prob[li] > threshold) {
labels.add(li);
}
if (reject && labels.size() > (numLabels / 2)) {
//System.err.println("Oracle labeler rejected labeling: " + list.getDataAlphabet().lookupObject(fi));
logger.info("Oracle labeler rejected labeling: " + list.getDataAlphabet().lookupObject(fi));
discard = true;
break;
}
}
if (discard) {
continue;
}
}
else {
labels.add(sortedIndices[0]);
}
labeledFeatures.put(fi, labels);
}
return labeledFeatures;
}