本文整理汇总了Java中cc.mallet.types.MatrixOps类的典型用法代码示例。如果您正苦于以下问题:Java MatrixOps类的具体用法?Java MatrixOps怎么用?Java MatrixOps使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MatrixOps类属于cc.mallet.types包,在下文中一共展示了MatrixOps类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: diag
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
public static Matrix diag (int[] sizes, double v)
{
int maxN = MatrixOps.max (sizes);
double[] vals = new double[maxN];
Arrays.fill (vals, v);
/* Compute indices of diagonals */
int[] idxs = new int [maxN];
for (int i = 0; i < idxs.length; i++) {
int[] oneIdx = new int [sizes.length];
Arrays.fill (oneIdx, i);
idxs[i] = Matrixn.singleIndex (sizes, oneIdx);
}
return new SparseMatrixn (sizes, idxs, vals);
}
示例2: testSample
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
public void testSample ()
{
Variable v = new Variable (3);
double[] vals = new double[] { 1, 3, 2 };
TableFactor ptl = new TableFactor (v, vals);
int[] sampled = new int [100];
Randoms r = new Randoms (32423);
for (int i = 0; i < sampled.length; i++) {
sampled[i] = ptl.sampleLocation (r);
}
double sum = MatrixOps.sum (vals);
double[] counts = new double [vals.length];
for (int i = 0; i < vals.length; i++) {
counts[i] = ArrayUtils.count (sampled, i);
}
MatrixOps.print (counts);
for (int i = 0; i < vals.length; i++) {
double prp = counts[i] / ((double) sampled.length);
assertEquals (vals[i] / sum, prp, 0.1);
}
}
示例3: testSample
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
public void testSample ()
{
Variable var = new Variable (Variable.CONTINUOUS);
Randoms r = new Randoms (2343);
Factor f = new UniNormalFactor (var, -1.0, 2.0);
TDoubleArrayList lst = new TDoubleArrayList ();
for (int i = 0; i < 10000; i++) {
Assignment assn = f.sample (r);
lst.add (assn.getDouble (var));
}
double[] vals = lst.toNativeArray ();
double mean = MatrixOps.mean (vals);
double std = MatrixOps.stddev (vals);
assertEquals (-1.0, mean, 0.025);
assertEquals (Math.sqrt(2.0), std, 0.01);
}
示例4: testContinousSample
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
public void testContinousSample () throws IOException
{
ModelReader reader = new ModelReader ();
FactorGraph fg = reader.readModel (new BufferedReader (new StringReader (uniformMdlstr)));
Randoms r = new Randoms (324143);
Assignment allAssn = new Assignment ();
for (int i = 0; i < 10000; i++) {
Assignment row = fg.sample (r);
allAssn.addRow (row);
}
Variable x1 = fg.findVariable ("x1");
Assignment assn1 = (Assignment) allAssn.marginalize (x1);
int[] col = assn1.getColumnInt (x1);
double mean = MatrixOps.sum (col) / ((double)col.length);
assertEquals (0.5, mean, 0.025);
}
示例5: testContinousSample2
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
public void testContinousSample2 () throws IOException
{
ModelReader reader = new ModelReader ();
FactorGraph fg = reader.readModel (new BufferedReader (new StringReader (uniformMdlstr2)));
Randoms r = new Randoms (324143);
Assignment allAssn = new Assignment ();
for (int i = 0; i < 10000; i++) {
Assignment row = fg.sample (r);
allAssn.addRow (row);
}
Variable x1 = fg.findVariable ("x2");
Assignment assn1 = (Assignment) allAssn.marginalize (x1);
int[] col = assn1.getColumnInt (x1);
double mean = MatrixOps.sum (col) / ((double)col.length);
assertEquals (0.5, mean, 0.01);
Variable x2 = fg.findVariable ("x2");
Assignment assn2 = (Assignment) allAssn.marginalize (x2);
int[] col2 = assn2.getColumnInt (x2);
double mean2 = MatrixOps.sum (col2) / ((double)col2.length);
assertEquals (0.5, mean2, 0.025);
}
示例6: addState
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
public void addState (String name, double initialWeight, double finalWeight,
String[] destinationNames,
String[] labelNames,
String[][] weightNames)
{
assert (weightNames.length == destinationNames.length);
assert (labelNames.length == destinationNames.length);
weightsStructureChanged();
if (name2state.get(name) != null)
throw new IllegalArgumentException ("State with name `"+name+"' already exists.");
parameters.initialWeights = MatrixOps.append(parameters.initialWeights, initialWeight);
parameters.finalWeights = MatrixOps.append(parameters.finalWeights, finalWeight);
State s = newState (name, states.size(), initialWeight, finalWeight,
destinationNames, labelNames, weightNames, this);
s.print ();
states.add (s);
if (initialWeight > IMPOSSIBLE_WEIGHT)
initialStates.add (s);
name2state.put (name, s);
}
示例7: getValue
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
public double getValue () {
if (cacheIndicator.isValueStale()) {
// compute values again
try {
// run all threads and wait for them to finish
List<Future<Double>> results = executor.invokeAll(valueTasks);
// compute final log probability
int batch = 0;
for (Future<Double> f : results) {
try {
batchCachedValue[batch++] = f.get();
} catch (ExecutionException ee) {
ee.printStackTrace();
}
}
} catch (InterruptedException ie) {
ie.printStackTrace();
}
double cachedValue = MatrixOps.sum(batchCachedValue);
logger.info("getValue() (loglikelihood, optimizable by label likelihood) =" + cachedValue);
return cachedValue;
}
return MatrixOps.sum(batchCachedValue);
}
示例8: getValueGradient
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
public void getValueGradient (double [] buffer)
{
// PriorGradient is -parameter/gaussianPriorVariance
// Gradient is (constraint - expectation + PriorGradient)
// == -(expectation - constraint - PriorGradient).
// Gradient points "up-hill", i.e. in the direction of higher value
if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
getValue (); // This will fill in the this.expectation, updating it if necessary
MatrixOps.setAll(cachedGradie, 0);
double[] b2 = new double[buffer.length];
for (int i = 0; i < opts.length; i++) {
MatrixOps.setAll(b2, 0);
opts[i].getValueGradient(b2);
MatrixOps.plusEquals(cachedGradie, b2);
}
cachedGradientWeightsStamp = crf.weightsValueChangeStamp;
}
System.arraycopy(cachedGradie, 0, buffer, 0, cachedGradie.length);
}
示例9: computeHessianProduct
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
private void computeHessianProduct(Optimizable.ByBatchGradient maxable,
double[] parameters, int batchIndex, int[] batchAssignments,
double[] currentGradient, double[] vector, double[] result) {
int numParameters = maxable.getNumParameters();
double eps = 1.0e-6;
double[] epsGradient = new double[numParameters];
double[] oldParameters = new double[numParameters];
// adjust parameters by (eps * vector) and recompute gradient
System.arraycopy(parameters,0,oldParameters,0,numParameters);
MatrixOps.plusEquals(parameters, vector, eps);
maxable.setParameters(parameters);
maxable.getBatchValueGradient(epsGradient, batchIndex, batchAssignments);
// restore old parameters
maxable.setParameters(oldParameters);
// calculate Hessian product
for (int index = 0; index < result.length; index++) {
result[index] = (-epsGradient[index] - currentGradient[index]) / eps;
}
}
示例10: mapDirByInverseHessian
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
/**
* Adjusts direction based on approximate hessian inverse.
*
* @param yDotY
* y^T * y in BFGS calculation.
*/
private void mapDirByInverseHessian(double yDotY) {
if (s.size() == 0)
return;
int count = s.size();
for (int i = count - 1; i >= 0; i--) {
alphas[i] = -MatrixOps.dotProduct(s.get(i), direction)
/ rhos.get(i);
MatrixOps.plusEquals(direction, y.get(i), alphas[i]);
}
double scalar = rhos.get(count - 1) / yDotY;
logger.fine("Direction multiplier = " + scalar);
MatrixOps.timesEquals(direction, scalar);
for (int i = 0; i < count; i++) {
double beta = MatrixOps.dotProduct(y.get(i), direction)
/ rhos.get(i);
MatrixOps.plusEquals(direction, s.get(i), -alphas[i] - beta);
}
}
示例11: dirDeriv
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
private double dirDeriv() {
if (l1Weight == 0) {
return MatrixOps.dotProduct(direction, grad);
} else {
double val = 0.0;
for (int i = 0; i < direction.length; i++) {
if (direction[i] != 0) {
if (parameters[i] < 0) {
val += direction[i] * (grad[i] - l1Weight);
} else if (parameters[i] > 0) {
val += direction[i] * (grad[i] + l1Weight);
} else if (direction[i] < 0) {
val += direction[i] * (grad[i] - l1Weight);
} else if (direction[i] > 0) {
val += direction[i] * (grad[i] + l1Weight);
}
}
}
return val;
}
}
示例12: getUnnormalizedClassificationScores
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
public void getUnnormalizedClassificationScores (Instance instance, double[] scores)
{
// arrayOutOfBounds if pipe has grown since training
// int numFeatures = getAlphabet().size() + 1;
int numFeatures = this.defaultFeatureIndex + 1;
int numLabels = getLabelAlphabet().size();
assert (scores.length == numLabels);
FeatureVector fv = (FeatureVector) instance.getData ();
// Make sure the feature vector's feature dictionary matches
// what we are expecting from our data pipe (and thus our notion
// of feature probabilities.
assert (fv.getAlphabet ()
== this.instancePipe.getDataAlphabet ());
// Include the feature weights according to each label
for (int li = 0; li < numLabels; li++) {
scores[li] = parameters[li*numFeatures + defaultFeatureIndex]
+ MatrixOps.rowDotProduct (parameters, numFeatures,
li, fv,
defaultFeatureIndex,
(perClassFeatureSelection == null
? featureSelection
: perClassFeatureSelection[li]));
}
}
示例13: setTargetsUsingData
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
/**
* Set target distributions using estimates from data.
*
* @param list InstanceList used to estimate targets.
* @param features List of features for constraints.
* @param normalize Whether to normalize by feature counts
* @return Constraints (map of feature index to target), with targets
* set using estimates from supplied data.
*/
public static HashMap<Integer,double[]> setTargetsUsingData(InstanceList list, ArrayList<Integer> features, boolean useValues, boolean normalize) {
HashMap<Integer,double[]> constraints = new HashMap<Integer,double[]>();
double[][] featureLabelCounts = getFeatureLabelCounts(list,useValues);
for (int i = 0; i < features.size(); i++) {
int fi = features.get(i);
if (fi != list.getDataAlphabet().size()) {
double[] prob = featureLabelCounts[fi];
if (normalize) {
// Smooth probability distributions by adding a (very)
// small count. We just need to make sure they aren't
// zero in which case the KL-divergence is infinite.
MatrixOps.plusEquals(prob, 1e-8);
MatrixOps.timesEquals(prob, 1./MatrixOps.sum(prob));
}
constraints.put(fi, prob);
}
}
return constraints;
}
示例14: getUnnormalizedClassificationScores
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
public void getUnnormalizedClassificationScores (Instance instance, double[] scores)
{
// arrayOutOfBounds if pipe has grown since training
// int numFeatures = getAlphabet().size() + 1;
int numFeatures = this.defaultFeatureIndex + 1;
int numLabels = getLabelAlphabet().size();
assert (scores.length == numLabels);
FeatureVector fv = (FeatureVector) instance.getData ();
// Make sure the feature vector's feature dictionary matches
// what we are expecting from our data pipe (and thus our notion
// of feature probabilities.
assert (fv.getAlphabet ()
== this.instancePipe.getDataAlphabet ());
// Include the feature weights according to each label
for (int li = 0; li < numLabels; li++) {
scores[li] = parameters[li*numFeatures + defaultFeatureIndex]
+ MatrixOps.rowDotProduct (parameters, numFeatures,
li, fv,
defaultFeatureIndex,
(perClassFeatureSelection == null
? featureSelection
: perClassFeatureSelection[li]));
}
}
示例15: getClassificationScoresWithTemperature
import cc.mallet.types.MatrixOps; //导入依赖的package包/类
public void getClassificationScoresWithTemperature (Instance instance, double temperature, double[] scores)
{
getUnnormalizedClassificationScores(instance, scores);
//scores should be divided by temperature, scores are sum of weighted features
MatrixOps.timesEquals(scores, 1/temperature);
// Move scores to a range where exp() is accurate, and normalize
int numLabels = getLabelAlphabet().size();
double max = MatrixOps.max (scores);
double sum = 0;
for (int li = 0; li < numLabels; li++)
sum += (scores[li] = Math.exp (scores[li] - max));
for (int li = 0; li < numLabels; li++) {
scores[li] /= sum;
// xxxNaN assert (!Double.isNaN(scores[li]));
}
}