本文整理汇总了Java中gov.sandia.cognition.math.matrix.Vector类的典型用法代码示例。如果您正苦于以下问题:Java Vector类的具体用法?Java Vector怎么用?Java Vector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Vector类属于gov.sandia.cognition.math.matrix包,在下文中一共展示了Vector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: HandWritingNeuralNetSANDIA
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
/**
* @throws IOException
* Load X input and y output from {@link #INPUT_LOCATION} and
* {@link #OUTPUT_LOCATION}
*/
public HandWritingNeuralNetSANDIA() throws IOException {
final BufferedReader xReader = new BufferedReader(new InputStreamReader(
HandWritingNeuralNetSANDIA.class.getResourceAsStream(INPUT_LOCATION)));
final BufferedReader yReader = new BufferedReader(new InputStreamReader(
HandWritingNeuralNetSANDIA.class.getResourceAsStream(OUTPUT_LOCATION)));
this.xVals = fromCSV(xReader, 5000);
this.yVals = fromCSV(yReader, 5000);
examples = new TIntIntHashMap();
this.tests = new TIntObjectHashMap<List<IndependentPair<Vector, Vector>>>();
prepareDataCollection();
learnNeuralNet();
testNeuralNet();
// new HandWritingInputDisplay(xVals);
}
示例2: getAnnotations
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
getAnnotations(VectorNaiveBayesCategorizer<ANNOTATION, PDF> categorizer, Vector vec)
{
final List<ScoredAnnotation<ANNOTATION>> results = new ArrayList<ScoredAnnotation<ANNOTATION>>();
double logDenominator = Double.NEGATIVE_INFINITY;
for (final ANNOTATION category : categorizer.getCategories()) {
final double logPosterior = categorizer.computeLogPosterior(vec, category);
logDenominator = LogMath.add(logDenominator, logPosterior);
results.add(new ScoredAnnotation<ANNOTATION>(category, (float) logPosterior));
}
for (final ScoredAnnotation<ANNOTATION> scored : results)
scored.confidence = (float) Math.exp(scored.confidence - logDenominator);
Collections.sort(results, Collections.reverseOrder());
return results;
}
示例3: annotate
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
@Override
public List<ScoredAnnotation<ANNOTATION>> annotate(OBJECT object) {
final List<ScoredAnnotation<ANNOTATION>> results = new ArrayList<ScoredAnnotation<ANNOTATION>>();
for (final ANNOTATION annotation : annotations) {
// skip the negative class
if (annotation.equals(negativeClass))
continue;
final FeatureVector feature = extractor.extractFeature(object);
final Vector vector = convert(feature);
final double result = classifiers.get(annotation).evaluateAsDouble(vector);
if (result > 0) {
results.add(new ScoredAnnotation<ANNOTATION>(annotation, (float) Math.abs(result)));
}
}
return results;
}
示例4: prox
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
@Override
public Matrix prox(Matrix W, double lambda) {
final int nrows = W.getNumRows();
Matrix ret = SparseMatrixFactoryMTJ.INSTANCE.createMatrix(W.getNumRows(), W.getNumColumns());
final SparseRowMatrix Wrow = CFMatrixUtils.asSparseRow(W);
// Matrix Wrow = W;
ret = CFMatrixUtils.asSparseRow(ret);
for (int r = 0; r < nrows; r++) {
// Vector row = W.getRow(r);
final SparseVector row = Wrow.getRow(r);
final double rownorm = row.norm2();
if (rownorm > lambda) {
final double scal = (rownorm - lambda) / rownorm;
final Vector scaled = row.scale(scal);
ret.setRow(r, scaled);
}
}
return CFMatrixUtils.asSparseColumn(ret);
}
示例5: diag
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
/**
* Extract the diagonal elements as a vector
*
* @param mat
* the matrix to extract from
* @return the diagonal
*/
public static Vector diag(Matrix mat) {
Vector ret;
if (mat.getNumColumns() > mat.getNumRows()) {
ret = mat.getRow(0);
}
else {
ret = mat.getColumn(0);
}
final int rowcol = ret.getDimensionality();
for (int rc = 0; rc < rowcol; rc++) {
ret.setElement(rc, mat.getElement(rc, rc));
}
return ret;
}
示例6: testEstimateEigenvector
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
/**
* Test of estimateEigenVector method, of class gov.sandia.cognition.math.matrix.EigenvectorPowerIteration.
*/
public void testEstimateEigenvector()
{
System.out.println( "estimateEigenVector" );
int M = 3;
double r = 1;
Matrix C = MatrixFactory.getDefault().createUniformRandom( M, M, -r, r, random );
Matrix A = C.times( C.transpose() );
Vector u = VectorFactory.getDefault().copyValues( 1.0, 0.0, 0.0 );
double stoppingThreshold = 1e-5;
int maxIterations = 100;
Vector result = EigenvectorPowerIteration.estimateEigenvector( u, A, stoppingThreshold, maxIterations );
System.out.println( "EigenVector: " + result );
}
示例7: MarkovChain
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
/**
* Creates a new instance of ContinuousDensityHiddenMarkovModel
* @param initialProbability
* Initial probability Vector over the states. Each entry must be
* nonnegative and the Vector must sum to 1.
* @param transitionProbability
* Transition probability matrix. The entry (i,j) is the probability
* of transition from state "j" to state "i". As a corollary, all
* entries in the Matrix must be nonnegative and the
* columns of the Matrix must sum to 1.
*/
public MarkovChain(
Vector initialProbability,
Matrix transitionProbability )
{
if( !transitionProbability.isSquare() )
{
throw new IllegalArgumentException(
"transitionProbability must be square!" );
}
final int k = transitionProbability.getNumRows();
initialProbability.assertDimensionalityEquals( k );
this.setTransitionProbability(transitionProbability);
this.setInitialProbability(initialProbability);
}
示例8: testGetInputConverter
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
/**
* Test of getInputConverter method, of class gov.sandia.cognition.framework.learning.EvaluatorBasedCognitiveModuleSettings.
*/
public void testGetInputConverter()
{
DefaultSemanticLabel in1 = new DefaultSemanticLabel("in1");
DefaultSemanticLabel in2 = new DefaultSemanticLabel("in2");
CogxelVectorConverter inputConverter = new CogxelVectorConverter(
new SemanticLabel[]{in1, in2});
EvaluatorBasedCognitiveModuleSettings<Vector, Vector> instance =
new EvaluatorBasedCognitiveModuleSettings<Vector, Vector>();
assertNull(instance.getInputConverter());
instance.setInputConverter(inputConverter);
assertSame(instance.getInputConverter(), inputConverter);
}
示例9: testSetFoldCreator
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
/**
* Test of setFoldCreator method, of class SupervisedLearnerExperiment.
*/
public void testSetFoldCreator()
{
LearnerValidationExperiment
<InputOutputPair<Vector,Boolean>, InputOutputPair<Vector, Boolean>, Evaluator<Vector, Boolean>, Double, ConfidenceInterval>
instance = new LearnerValidationExperiment
<InputOutputPair<Vector,Boolean>, InputOutputPair<Vector, Boolean>, Evaluator<Vector, Boolean>, Double, ConfidenceInterval>();
assertNull(instance.getFoldCreator());
LeaveOneOutFoldCreator<InputOutputPair<Vector, Boolean>> foldCreator = new LeaveOneOutFoldCreator<InputOutputPair<Vector, Boolean>>();
instance.setFoldCreator(foldCreator);
assertSame(foldCreator, instance.getFoldCreator());
instance.setFoldCreator(null);
assertNull(instance.getFoldCreator());
}
示例10: testPMFSample
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
/**
* PMF.sample
*/
public void testPMFSample()
{
System.out.println( "PMF.sample" );
MultinomialDistribution.PMF pmf =
new MultinomialDistribution.PMF( this.createInstance() );
// Make sure that the samples are from the domain.
Collection<Vector> data = pmf.sample( RANDOM,NUM_SAMPLES );
ChiSquareConfidence.Statistic chiSquare =
ChiSquareConfidence.evaluateNullHypothesis(data, pmf);
System.out.println( "Chi Square: " + chiSquare );
assertEquals( 1.0, chiSquare.getNullHypothesisProbability(), CONFIDENCE );
}
示例11: testEvaluate
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
/**
* Test of evaluate method, of class gov.sandia.cognition.learning.kernel.SigmoidKernel.
*/
public void testEvaluate()
{
double kappa = RANDOM.nextDouble();
double constant = RANDOM.nextDouble();
SigmoidKernel instance = new SigmoidKernel(kappa, constant);
Vector zero = new Vector3();
Vector x = new Vector3(RANDOM.nextGaussian(), RANDOM.nextGaussian(), RANDOM.nextGaussian());
Vector y = new Vector3(RANDOM.nextGaussian(), RANDOM.nextGaussian(), RANDOM.nextGaussian());
assertEquals(Math.tanh(kappa * x.dotProduct(y) + constant),
instance.evaluate(x, y));
assertEquals(Math.tanh(kappa * x.dotProduct(y) + constant),
instance.evaluate(y, x));
assertEquals(Math.tanh(kappa * x.dotProduct(zero) + constant),
instance.evaluate(x, zero));
assertEquals(Math.tanh(kappa * y.dotProduct(zero) + constant),
instance.evaluate(y, zero));
assertEquals(Math.tanh(kappa * zero.dotProduct(zero) + constant),
instance.evaluate(zero, zero));
}
示例12: stateBeliefs
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
/**
* Computes the probability distribution over all states for each
* observation.
* @param observations
* @return
* The list of state belief probabilities for each observation.
*/
public ArrayList<Vector> stateBeliefs(
Collection<? extends ObservationType> observations )
{
ArrayList<Vector> bs = this.computeObservationLikelihoods(observations);
ArrayList<WeightedValue<Vector>> alphas =
this.computeForwardProbabilities(bs, true);
ArrayList<Vector> beliefs = new ArrayList<Vector>( alphas.size() );
for( WeightedValue<Vector> alpha : alphas )
{
beliefs.add( alpha.getValue() );
}
return beliefs;
}
示例13: testConstructors
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
public void testConstructors()
{
EvaluatorBasedCognitiveModuleFactory<Vector, Vector> instance =
new EvaluatorBasedCognitiveModuleFactory<Vector, Vector>();
assertNotNull(instance.getSettings());
EvaluatorBasedCognitiveModuleSettings<Vector, Vector> settings =
this.createSettings();
instance = new EvaluatorBasedCognitiveModuleFactory<Vector, Vector>(
settings, "Module Name");
assertSame(instance.getSettings(), settings);
instance = new EvaluatorBasedCognitiveModuleFactory<Vector, Vector>(
instance);
assertNotNull(instance.getSettings());
assertNotSame(instance.getSettings(), settings);
}
示例14: evaluate
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
public Double evaluate(
Vector input)
{
double sum = 0.0;
final int num = input.getDimensionality();
ArrayList<Double> values = new ArrayList<Double>( num );
for( int i = 0; i < num; i++ )
{
final double v = input.getElement(i);
sum += v;
values.add( v );
}
if( Math.abs(sum-1.0) > TOLERANCE )
{
throw new IllegalArgumentException( "input elements must sum to 1.0" );
}
return UnivariateStatisticsUtil.computeEntropy(values);
}
示例15: createInitialGuesses
import gov.sandia.cognition.math.matrix.Vector; //导入依赖的package包/类
/**
* Creates a set of pre-defined initialGuess coordinates
* @param dim
* Dimensionality of the guesses
* @param num
* Number of guesses to generate
* @return
* ArrayList of initialGuesses
*/
public ArrayList<Vector> createInitialGuesses(
int dim,
int num)
{
ArrayList<Vector> guesses = new ArrayList<Vector>(num);
double a = 5.0;
// double a = 0.0;
for (int n = 0; n < num; n++)
{
Vector v = VectorFactory.getDefault().createVector(dim);
for (int i = 0; i < v.getDimensionality(); i++)
{
v.setElement(i, this.random.nextDouble() * 2 * a - a);
}
guesses.add(v);
}
return guesses;
}