本文整理汇总了Java中cc.mallet.types.MatrixOps.timesEquals方法的典型用法代码示例。如果您正苦于以下问题:Java MatrixOps.timesEquals方法的具体用法?Java MatrixOps.timesEquals怎么用?Java MatrixOps.timesEquals使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cc.mallet.types.MatrixOps
的用法示例。
在下文中一共展示了MatrixOps.timesEquals方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: mapDirByInverseHessian
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
/**
* Adjusts direction based on approximate hessian inverse.
*
* @param yDotY
* y^T * y in BFGS calculation.
*/
private void mapDirByInverseHessian(double yDotY) {
if (s.size() == 0)
return;
int count = s.size();
for (int i = count - 1; i >= 0; i--) {
alphas[i] = -MatrixOps.dotProduct(s.get(i), direction)
/ rhos.get(i);
MatrixOps.plusEquals(direction, y.get(i), alphas[i]);
}
double scalar = rhos.get(count - 1) / yDotY;
logger.fine("Direction multiplier = " + scalar);
MatrixOps.timesEquals(direction, scalar);
for (int i = 0; i < count; i++) {
double beta = MatrixOps.dotProduct(y.get(i), direction)
/ rhos.get(i);
MatrixOps.plusEquals(direction, s.get(i), -alphas[i] - beta);
}
}
示例2: setTargetsUsingData
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
/**
* Set target distributions using estimates from data.
*
* @param list InstanceList used to estimate targets.
* @param features List of features for constraints.
* @param normalize Whether to normalize by feature counts
* @return Constraints (map of feature index to target), with targets
* set using estimates from supplied data.
*/
public static HashMap<Integer,double[]> setTargetsUsingData(InstanceList list, ArrayList<Integer> features, boolean useValues, boolean normalize) {
HashMap<Integer,double[]> constraints = new HashMap<Integer,double[]>();
double[][] featureLabelCounts = getFeatureLabelCounts(list,useValues);
for (int i = 0; i < features.size(); i++) {
int fi = features.get(i);
if (fi != list.getDataAlphabet().size()) {
double[] prob = featureLabelCounts[fi];
if (normalize) {
// Smooth probability distributions by adding a (very)
// small count. We just need to make sure they aren't
// zero in which case the KL-divergence is infinite.
MatrixOps.plusEquals(prob, 1e-8);
MatrixOps.timesEquals(prob, 1./MatrixOps.sum(prob));
}
constraints.put(fi, prob);
}
}
return constraints;
}
示例3: getClassificationScoresWithTemperature
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getClassificationScoresWithTemperature (Instance instance, double temperature, double[] scores)
{
getUnnormalizedClassificationScores(instance, scores);
//scores should be divided by temperature, scores are sum of weighted features
MatrixOps.timesEquals(scores, 1/temperature);
// Move scores to a range where exp() is accurate, and normalize
int numLabels = getLabelAlphabet().size();
double max = MatrixOps.max (scores);
double sum = 0;
for (int li = 0; li < numLabels; li++)
sum += (scores[li] = Math.exp (scores[li] - max));
for (int li = 0; li < numLabels; li++) {
scores[li] /= sum;
// xxxNaN assert (!Double.isNaN(scores[li]));
}
}
示例4: rainbow
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
/**
* Returns a list of hex color names of length n.
* Colors are generated by equally-spaced hues in HSB space.
* @param n Number of "equally-spaced" colors to return
* @param s Saturation of generated colors
* @param b Brightness
* @return An array of hex color names, e.g., "#0033FF"
*/
public static String[] rainbow (int n, float s, float b)
{
double[] vals = new double[n];
for (int i = 0; i < n; i++) vals[i] = i;
MatrixOps.timesEquals (vals, 1.0/n);
String[] ret = new String[n];
for (int i = 0; i < n; i++) {
int rgb = Color.HSBtoRGB ((float) vals[i], s, b);
Color color = new Color (rgb);
ret[i] = colorToHexString (color);
}
return ret;
}
示例5: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient(double[] buffer) {
if (cachedGradientWeightsStamp != crf.getWeightsValueChangeStamp()) {
cachedGradientWeightsStamp = crf.getWeightsValueChangeStamp();
getValue();
expectations.plusEquals(constraints, -1.0);
expectations.plusEqualsGaussianPriorGradient(crf.getParameters(), -gaussianPriorVariance);
expectations.assertNotNaNOrInfinite();
expectations.getParameters(cachedGradient);
MatrixOps.timesEquals(cachedGradient, -weight);
}
System.arraycopy(cachedGradient, 0, buffer, 0, cachedGradient.length);
}
示例6: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient(double[] buffer) {
if (crf.getWeightsValueChangeStamp() != cache) {
cacheValueAndGradient();
cache = crf.getWeightsValueChangeStamp();
}
// TODO this will also multiply the prior, if active!
cachedGradient.getParameters(buffer);
if (weight != 1) {
MatrixOps.timesEquals(buffer, weight);
}
}
示例7: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double [] buffer)
{
// PriorGradient is -parameter/gaussianPriorVariance
// Gradient is (constraint - expectation + PriorGradient)
// == -(expectation - constraint - PriorGradient).
// Gradient points "up-hill", i.e. in the direction of higher value
if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
cachedGradientWeightsStamp = crf.weightsValueChangeStamp; // cachedGradient will soon no longer be stale
// This will fill in the this.expectation, updating it if necessary
getValue ();
assertNotNaNOrInfinite();
// Gradient is constraints - expectations + prior. We do this by -(expectations - constraints - prior).
expectations.plusEquals(constraints, -1.0);
if (usingHyperbolicPrior)
expectations.plusEqualsHyperbolicPriorGradient(crf.parameters, -hyperbolicPriorSlope, hyperbolicPriorSharpness);
else
expectations.plusEqualsGaussianPriorGradient(crf.parameters, -gaussianPriorVariance);
expectations.assertNotNaNOrInfinite();
expectations.getParameters(cachedGradient);
MatrixOps.timesEquals (cachedGradient, -1.0); // This implements the -(...) in the above comment
// xxx Show the feature with maximum gradient
// TODO Is something like this negation still necessary?????
// up to now we've been calculating the weightGradient.
// take the opposite to get the valueGradient
//cachedGradient.timesEquals (-1.0); // point uphill
}
// What the heck was this!?: if (buffer.length != this.numParameters) buffer = new double[this.numParameters];
System.arraycopy(cachedGradient, 0, buffer, 0, cachedGradient.length);
//Arrays.fill (buffer, 0.0);
//System.arraycopy(cachedGradie, 0, buffer, 0, 2*crf.parameters.initialWeights.length); // TODO For now, just copy the state inital/final weights
}
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:36,代码来源:CRFOptimizableByLabelLikelihood.java
示例8: optimize
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public boolean optimize (int numIterations)
{
int iterations;
double fret;
double fp = optimizable.getValue ();
double[] xi = new double [optimizable.getNumParameters()];
optimizable.getValueGradient(xi);
for (iterations = 0; iterations < numIterations; iterations++) {
logger.info ("At iteration "+iterations+", cost = "+fp+", scaled = "+maxStep+" step = "+step+", gradient infty-norm = "+MatrixOps.infinityNorm (xi));
// Ensure step not too large
double sum = MatrixOps.twoNorm (xi);
if (sum > stpmax) {
logger.info ("*** Step 2-norm "+sum+" greater than max "+stpmax+" Scaling...");
MatrixOps.timesEquals (xi,stpmax/sum);
}
step = lineMaximizer.optimize (xi, step);
fret = optimizable.getValue ();
if (2.0*Math.abs(fret-fp) <= tolerance*(Math.abs(fret)+Math.abs(fp)+eps)) {
logger.info ("Gradient Ascent: Value difference "+Math.abs(fret-fp)+" below " +
"tolerance; saying converged.");
converged = true;
return true;
}
fp = fret;
optimizable.getValueGradient(xi);
if (eval != null) {
eval.evaluate (optimizable, iterations);
}
}
return false;
}
示例9: classify
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
/**
* Classifies an instance using BalancedWinnow's weights
*
* <p>Returns a Classification containing the normalized
* dot products between class weight vectors and the instance
* feature vector.
*
* <p>One can obtain the confidence of the classification by
* calculating weight(j')/weight(j), where j' is the
* highest weight prediction and j is the 2nd-highest.
* Another possibility is to calculate
* <br><tt><center>e^{dot(w_j', x} / sum_j[e^{dot(w_j, x)}]</center></tt>
*/
public Classification classify (Instance instance)
{
int numClasses = getLabelAlphabet().size();
int numFeats = getAlphabet().size();
double[] scores = new double[numClasses];
FeatureVector fv = (FeatureVector) instance.getData ();
// Make sure the feature vector's feature dictionary matches
// what we are expecting from our data pipe (and thus our notion
// of feature probabilities.
assert (instancePipe == null || fv.getAlphabet () == this.instancePipe.getDataAlphabet ());
int fvisize = fv.numLocations();
// Take dot products
double sum = 0;
for (int ci = 0; ci < numClasses; ci++) {
for (int fvi = 0; fvi < fvisize; fvi++) {
int fi = fv.indexAtLocation (fvi);
double vi = fv.valueAtLocation(fvi);
if ( m_weights[ci].length > fi ) {
scores[ci] += vi * m_weights[ci][fi];
sum += vi * m_weights[ci][fi];
}
}
scores[ci] += m_weights[ci][numFeats];
sum += m_weights[ci][numFeats];
}
MatrixOps.timesEquals(scores, 1.0 / sum);
// Create and return a Classification object
return new Classification (instance, this, new LabelVector (getLabelAlphabet(), scores));
}
示例10: getValueGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient(double[] buffer) {
// PriorGradient is -parameter/gaussianPriorVariance
// Gradient is (constraint - expectation + PriorGradient)
// == -(expectation - constraint - PriorGradient).
// Gradient points "up-hill", i.e. in the direction of higher value
if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
cachedGradientWeightsStamp = crf.weightsValueChangeStamp;
getValue();
assertNotNaNOrInfinite();
// Gradient is constraints - expectations - distance. We do this by
// -(expectations - constraints + distance).
expectations.plusEquals(constraints, -1.0);
expectations.plusEquals(distances, lambda);
// we keep weights for invalid features being zeros by setting their
// gradients to zeros
// setInvalidToZero(expectations.weights);
expectations.assertNotNaNOrInfinite();
expectations.getParameters(cachedGradient);
MatrixOps.timesEquals(cachedGradient, -1.0);
}
// What the heck was this!?: if (buffer.length != this.numParameters)
// buffer = new double[this.numParameters];
System.arraycopy(cachedGradient, 0, buffer, 0, cachedGradient.length);
// Arrays.fill (buffer, 0.0);
// System.arraycopy(cachedGradie, 0, buffer, 0,
// 2*crf.parameters.initialWeights.length); // TODO For now, just copy
// the state inital/final weights
}
示例11: evalGradient
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
/**
* Evaluate gradient, make it a descent direction.
*/
private void evalGradient() {
optimizable.getValueGradient(grad);
adjustGradForInfiniteParams(grad);
MatrixOps.timesEquals(grad, -1.0);
}
示例12: labelFeatures
import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
/**
* Label features using heuristic described in
* "Learning from Labeled Features using Generalized Expectation Criteria"
* Gregory Druck, Gideon Mann, Andrew McCallum.
*
* @param list InstanceList used to compute statistics for labeling features.
* @param features List of features to label.
* @param reject Whether to reject labeling features.
* @return Labeled features, HashMap mapping feature indices to list of labels.
*/
public static HashMap<Integer, ArrayList<Integer>> labelFeatures(InstanceList list, ArrayList<Integer> features, boolean reject) {
HashMap<Integer,ArrayList<Integer>> labeledFeatures = new HashMap<Integer,ArrayList<Integer>>();
double[][] featureLabelCounts = getFeatureLabelCounts(list,true);
int numLabels = list.getTargetAlphabet().size();
int minRank = 100 * numLabels;
InfoGain infogain = new InfoGain(list);
double sum = 0;
for (int rank = 0; rank < minRank; rank++) {
sum += infogain.getValueAtRank(rank);
}
double mean = sum / minRank;
for (int i = 0; i < features.size(); i++) {
int fi = features.get(i);
// reject features with infogain
// less than cutoff
if (reject && infogain.value(fi) < mean) {
//System.err.println("Oracle labeler rejected labeling: " + list.getDataAlphabet().lookupObject(fi));
logger.info("Oracle labeler rejected labeling: " + list.getDataAlphabet().lookupObject(fi));
continue;
}
double[] prob = featureLabelCounts[fi];
MatrixOps.plusEquals(prob,1e-8);
MatrixOps.timesEquals(prob, 1./MatrixOps.sum(prob));
int[] sortedIndices = getMaxIndices(prob);
ArrayList<Integer> labels = new ArrayList<Integer>();
if (numLabels > 2) {
// take anything within a factor of 2 of the best
// but no more than numLabels/2
boolean discard = false;
double threshold = prob[sortedIndices[0]] / 2;
for (int li = 0; li < numLabels; li++) {
if (prob[li] > threshold) {
labels.add(li);
}
if (reject && labels.size() > (numLabels / 2)) {
//System.err.println("Oracle labeler rejected labeling: " + list.getDataAlphabet().lookupObject(fi));
logger.info("Oracle labeler rejected labeling: " + list.getDataAlphabet().lookupObject(fi));
discard = true;
break;
}
}
if (discard) {
continue;
}
}
else {
labels.add(sortedIndices[0]);
}
labeledFeatures.put(fi, labels);
}
return labeledFeatures;
}