本文整理汇总了Java中gov.sandia.cognition.math.matrix.Vector.equals方法的典型用法代码示例。如果您正苦于以下问题:Java Vector.equals方法的具体用法?Java Vector.equals怎么用?Java Vector.equals使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gov.sandia.cognition.math.matrix.Vector
的用法示例。
在下文中一共展示了Vector.equals方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testEvaluate
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of evaluate method, of class gov.sandia.cognition.learning.util.function.LinearCombinationVectorFunction.
*/
public void testEvaluate()
{
System.out.println("evaluate");
for (int i = 0; i < 100; i++)
{
int M = RANDOM.nextInt(10) + 1;
Vector x = VectorFactory.getDefault().createUniformRandom(M, -1, 1, RANDOM);
LinearCombinationVectorFunction f = this.createInstance();
Vector y = VectorFactory.getDefault().createVector(M);
for (int n = 0; n < f.getBasisFunctions().size(); n++)
{
y.plusEquals(f.getBasisFunctions().get(n).evaluate(x).scale(
f.getCoefficients().getElement(n)));
}
Vector yhat = f.evaluate(x);
if (y.equals(yhat, 1e-5) == false)
{
assertEquals(y, yhat);
}
}
}
示例2: testLearnFullCovariance
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of learnFullCovariance method, of class MultivariateDecorrelator.
*/
public void testLearnFullCovariance()
{
System.out.println("learnFullCovariance");
MultivariateDecorrelator.FullCovarianceLearner learner =
new MultivariateDecorrelator.FullCovarianceLearner();
learner.setDefaultCovariance(0.0);
ArrayList<Vector> data = this.createDataset();
MultivariateDecorrelator instance = learner.learn( data );
Vector mean = MultivariateStatisticsUtil.computeMean(data);
if( !mean.equals( instance.getMean() ) )
{
assertEquals( mean, instance.getMean() );
}
Matrix covariance = MultivariateStatisticsUtil.computeVariance(data,mean);
if( !covariance.equals(instance.getCovariance(), TOLERANCE ) )
{
assertEquals( covariance, instance.getCovariance() );
}
}
示例3: testEvaluate
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of evaluate method, of class gov.sandia.cognition.learning.pca.PrincipalComponentsAnalysisFunction.
*/
public void testEvaluate()
{
System.out.println("evaluate");
for( int i = 0; i < 100; i++ )
{
PrincipalComponentsAnalysisFunction instance = this.createInstance();
Vector x = VectorFactory.getDefault().createUniformRandom( instance.getInputDimensionality(), -1, 1, random );
Vector y = instance.getDimensionReducer().evaluate( x.minus( instance.getMean() ) );
Vector yhat = instance.evaluate( x );
if( y.equals( yhat, 1e-5 ) == false )
{
assertEquals( y, yhat );
}
}
}
示例4: testComputeVector
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of computeVector method, of class gov.sandia.cognition.learning.linesearch.DirectionalVectorToScalarFunction.
*/
public void testComputeVector()
{
System.out.println("computeVector");
double scaleFactor = random.nextGaussian();
DirectionalVectorToScalarFunction instance = this.createInstance();
Vector delta = instance.getVectorOffset().plus( instance.getDirection().scale( scaleFactor ) );
Vector result = instance.computeVector( scaleFactor );
if( !delta.equals( result, TOLERANCE ) )
{
assertEquals( delta, result );
}
}
示例5: testSolve_AbstractMTJVector
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of solve method, of class DiagonalMatrixMTJ.
*/
public void testSolve_AbstractMTJVector()
{
System.out.println( "solve" );
DiagonalMatrixMTJ A = this.createRandom();
int M = A.getDimensionality();
final double r = 10.0;
AbstractMTJVector x = (AbstractMTJVector) DenseVectorFactoryMTJ.INSTANCE.createUniformRandom( M, -r, r, RANDOM );
Vector b = A.times( x );
Vector xhat = A.solve( b );
if( !xhat.equals( x, TOLERANCE ) )
{
assertEquals( xhat, x );
}
}
示例6: testSolve_Vector
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of solve method, of class DiagonalMatrixMTJ.
*/
public void testSolve_Vector()
{
System.out.println( "solve" );
DiagonalMatrixMTJ A = this.createRandom();
int M = A.getDimensionality();
final double r = 10.0;
Vector x = VectorFactory.getDefault().createUniformRandom( M, -r, r, RANDOM );
Vector b = A.times( x );
Vector xhat = A.solve( b );
if( !xhat.equals( x, TOLERANCE ) )
{
assertEquals( xhat, x );
}
}
示例7: testComputeParameterGradient
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of computeParameterGradient method, of class ParallelizedCostFunctionContainer.
*/
public void testComputeParameterGradient()
{
System.out.println( "computeParameterGradient" );
int num = 100;
int dim = 3;
double r = 1.0;
MultivariateDiscriminant f = new MultivariateDiscriminant(
MatrixFactory.getDefault().createUniformRandom( dim, dim, -r, r, RANDOM ) );
new LinearVectorFunction( RANDOM.nextDouble() );
ArrayList<InputOutputPair<Vector,Vector>> data =
new ArrayList<InputOutputPair<Vector, Vector>>( num );
for( int n = 0; n < num; n++ )
{
data.add( new DefaultInputOutputPair<Vector, Vector>(
VectorFactory.getDefault().createUniformRandom( dim, -r, r, RANDOM ),
VectorFactory.getDefault().createUniformRandom( dim, -r, r, RANDOM ) ) );
}
ParallelizedCostFunctionContainer instance = new ParallelizedCostFunctionContainer();
SumSquaredErrorCostFunction costFunction = new SumSquaredErrorCostFunction();
instance.setCostFunction( costFunction );
instance.setCostParameters( data );
instance.setThreadPool( ParallelUtil.createThreadPool( 1 ));
Vector result = instance.computeParameterGradient( f );
instance.getCostFunction().setCostParameters( data );
Vector expected = instance.getCostFunction().computeParameterGradient( f );
if( !result.equals( expected, 1e-5 ) )
{
assertEquals( expected, result );
}
}
示例8: testEvaluate
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of evaluate method, of class gov.sandia.cognition.learning.util.function.LinearRegressionEvaluator.
*/
public void testEvaluate()
{
System.out.println( "evaluate" );
int maxBufferSize = 5;
LinearRegressionCoefficientExtractor instance =
new LinearRegressionCoefficientExtractor( maxBufferSize );
instance.evaluate( new Vector3( 0.0, 0.0, 0.0 ) );
instance.evaluate( new Vector3( 0.0, 0.0, 0.0 ) );
instance.evaluate( new Vector3( 0.0, 0.0, 0.0 ) );
instance.evaluate( new Vector3( 0.0, 0.0, 0.0 ) );
instance.evaluate( new Vector3( 0.0, 0.0, 0.0 ) );
instance.evaluate( new Vector3( 0.0, 10.0, 0.0 ) );
instance.evaluate( new Vector3( 0.0, 12.0, 2.0 ) );
instance.evaluate( new Vector3( 0.0, 14.0, 1.0 ) );
instance.evaluate( new Vector3( 0.0, 16.0, 2.0 ) );
Vector retval = instance.evaluate( new Vector3( 0.0, 18.0, 0.0 ) );
assertEquals( maxBufferSize, instance.getState().size() );
Vector3 expected_ms = new Vector3( 0.0, 2.0, 0.0 );
Vector3 expected_bs = new Vector3( 0.0, 18.0, 1.0 );
Vector expected = expected_bs.stack(expected_ms);
// Matrix expected = MatrixFactory.getDefault().createMatrix( 3, 2 );
// expected.setColumn( 1, expected_ms );
// expected.setColumn( 0, expected_bs );
System.out.println( "Norm2: " + expected.minus( retval ).norm2() );
if (!expected.equals( retval, 1e-5 ))
{
assertEquals( expected, retval );
}
}
示例9: testEvaluate
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of evaluate method, of class MultivariateDecorrelator.
*/
public void testEvaluate()
{
System.out.println("evaluate");
MultivariateDecorrelator instance = this.createInstance();
Vector input = this.createRandomInput();
Vector expected = input.minus(instance.getMean()).times( instance.getCovarianceInverseSquareRoot() );
Vector result = instance.evaluate(input);
if( !expected.equals( result, TOLERANCE ) )
{
assertEquals( expected, result );
}
}
示例10: testGetFutureStateDistribution
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of getFutureStateDistribution method, of class MarkovChain.
*/
public void testGetFutureStateDistribution()
{
System.out.println("getFutureStateDistribution");
MarkovChain instance = this.createInstance();
Vector expected = instance.transitionProbability.times(
instance.getInitialProbability() );
Vector result = instance.getFutureStateDistribution(
instance.getInitialProbability(), 1);
if( !expected.equals( result ) )
{
assertEquals( expected, result );
}
expected = instance.getInitialProbability();
assertEquals( expected, instance.getFutureStateDistribution( instance.getInitialProbability(), -1 ) );
assertEquals( expected, instance.getFutureStateDistribution( instance.getInitialProbability(), 0 ) );
for( int i = 1; i < 100; i++ )
{
expected = instance.getTransitionProbability().times( expected );
expected = expected.scale( 1.0/result.norm1() );
result = instance.getFutureStateDistribution(
instance.getInitialProbability(), i );
if( !expected.equals( result, TOLERANCE ) )
{
assertEquals( expected, result );
}
}
}
示例11: testSummarize
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of summarize method, of class WeightedRingAverager.
*/
public void testSummarize()
{
System.out.println("summarize");
final int NUM_SAMPLES = 100;
final int DIM = 2;
ArrayList<DefaultWeightedValue<Vector>> data =
new ArrayList<DefaultWeightedValue<Vector>>( NUM_SAMPLES );
RingAccumulator<Vector> average =
new RingAccumulator<Vector>();
double weightSum = 0.0;
for( int n = 0; n < NUM_SAMPLES; n++ )
{
Vector v = VectorFactory.getDefault().createUniformRandom(
DIM, -1.0, 1.0, RANDOM );
double w = RANDOM.nextDouble();
weightSum += w;
data.add( new DefaultWeightedValue<Vector>( v, w ) );
average.accumulate( v.scale(w) );
}
WeightedRingAverager<Vector> instance = new WeightedRingAverager<Vector>();
Vector result = instance.summarize(data);
Vector expected = average.getSum().scale( 1.0/weightSum );
if( !expected.equals( result, TOLERANCE ) )
{
assertEquals( expected, result );
}
}
示例12: testLearn
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Learn
*/
public void testLearn()
{
System.out.println( "ClosedFormSolver.learn" );
FisherLinearDiscriminantBinaryCategorizer.ClosedFormSolver learner =
new FisherLinearDiscriminantBinaryCategorizer.ClosedFormSolver( 0.0 );
int num = 1000;
double r = 1e-0;
Vector m0 = this.createRandomInput();
int M = m0.getDimensionality();
Matrix A0 = MatrixFactory.getDefault().createUniformRandom( M, M, -r, r, RANDOM );
ArrayList<Vector> d0 = MultivariateGaussian.sample( m0, A0, RANDOM, num );
Vector m1 = this.createRandomInput();
Matrix A1 = MatrixFactory.getDefault().createUniformRandom( M, M, -r, r, RANDOM );
ArrayList<Vector> d1 = MultivariateGaussian.sample( m1, A1, RANDOM, num );
ArrayList<InputOutputPair<Vector,Boolean>> data =
new ArrayList<InputOutputPair<Vector,Boolean>>( d0.size() + d1.size() );
for( int i = 0; i < d1.size(); i++ )
{
data.add( new DefaultInputOutputPair<Vector,Boolean>( d1.get(i), true ) );
}
for( int i = 0; i < d0.size(); i++ )
{
data.add( new DefaultInputOutputPair<Vector,Boolean>( d0.get(i), false ) );
}
FisherLinearDiscriminantBinaryCategorizer f = learner.learn( data );
assertEquals( M, ((LinearDiscriminant) f.getEvaluator()).getWeightVector().getDimensionality() );
Pair<Vector,Matrix> r0 =
MultivariateStatisticsUtil.computeMeanAndCovariance(d0);
Vector m0hat = r0.getFirst();
Matrix c0hat = r0.getSecond();
Pair<Vector,Matrix> r1 =
MultivariateStatisticsUtil.computeMeanAndCovariance(d1);
Vector m1hat = r1.getFirst();
Matrix c1hat = r1.getSecond();
Vector what = c1hat.plus( c0hat ).inverse().times( m1hat.minus( m0hat ) );
if( what.equals( ((LinearDiscriminant) f.getEvaluator()).getWeightVector(), TOLERANCE ) == false )
{
assertEquals( what, ((LinearDiscriminant) f.getEvaluator()).getWeightVector() );
}
}
示例13: testLearnDiagonalCovariance
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of learnDiagonalCovariance method, of class MultivariateDecorrelator.
*/
public void testLearnDiagonalCovariance()
{
System.out.println("learnDiagonalCovariance");
ArrayList<Vector> data = this.createDataset();
MultivariateDecorrelator.DiagonalCovarianceLearner learner =
new MultivariateDecorrelator.DiagonalCovarianceLearner();
learner.setDefaultCovariance(0.0);
MultivariateDecorrelator instance = learner.learn(data);
Vector mean = MultivariateStatisticsUtil.computeMean(data);
if( !mean.equals( instance.getMean() ) )
{
assertEquals( mean, instance.getMean() );
}
Matrix Chat = instance.getCovariance();
final int M = mean.getDimensionality();
assertEquals( M, Chat.getNumRows() );
assertEquals( M, Chat.getNumColumns() );
double biasedAdjustment = (data.size()-1.0)/data.size();
for( int i = 0; i < M; i++ )
{
ArrayList<Double> di = new ArrayList<Double>( data.size() );
for( Vector v : data )
{
di.add(v.getElement(i));
}
for( int j = 0; j < M; j++ )
{
if( i == j )
{
double variance = biasedAdjustment * UnivariateStatisticsUtil.computeVariance(di);
assertEquals( variance, Chat.getElement(i,i), TOLERANCE );
}
else
{
assertEquals( 0.0, Chat.getElement(i, j) );
}
}
}
}
示例14: testPCALearn
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of learn method, of class gov.sandia.cognition.learning.pca.PrincipalComponentsAnalysis.
*
* The example data is based on: http://www.kernel-machines.org/code/kpca_toy.m
*/
public void testPCALearn()
{
System.out.println( "PCA.learn" );
int num = random.nextInt( 100 ) + 10;
ArrayList<Vector> data = new ArrayList<Vector>( num );
final double r1 = random.nextDouble();
final double r2 = r1 / random.nextDouble();
for (int i = 0; i < num; i++)
{
data.add( VectorFactory.getDefault().createUniformRandom( INPUT_DIM, r1, r2, random ) );
}
Vector mean = MultivariateStatisticsUtil.computeMean( data );
DenseMatrix X = DenseMatrixFactoryMTJ.INSTANCE.createMatrix( INPUT_DIM, num );
for (int n = 0; n < num; n++)
{
X.setColumn( n, data.get( n ).minus( mean ) );
}
final ArrayList<Vector> dataCopy = ObjectUtil.cloneSmartElementsAsArrayList(data);
long startsvd = System.currentTimeMillis();
SingularValueDecomposition svd = SingularValueDecompositionMTJ.create( X );
long stopsvd = System.currentTimeMillis();
long start = System.currentTimeMillis();
PrincipalComponentsAnalysis instance = this.createPCAInstance();
PrincipalComponentsAnalysisFunction f = instance.learn( data );
long stop = System.currentTimeMillis();
assertEquals(dataCopy, data);
System.out.println( "Uhat:\n" + f.getDimensionReducer().getDiscriminant().transpose() );
System.out.println( "U:\n" + svd.getU() );
System.out.println( "Time taken: SVD = " + (stopsvd - startsvd) + ", PCA = " + (stop - start) );
// Make sure the PCA algorithm subtracted off the sample mean
if (mean.equals( f.getMean(), 1e-5 ) == false)
{
assertEquals( mean, f.getMean() );
}
assertEquals( OUTPUT_DIM, instance.getNumComponents() );
assertEquals( instance.getNumComponents(), f.getOutputDimensionality() );
assertEquals( INPUT_DIM, f.getInputDimensionality() );
if (mean.equals( f.getMean(), 1e-5 ) == false)
{
assertEquals( mean, f.getMean() );
}
double absnorm = 0.0;
int nc = instance.getNumComponents() * INPUT_DIM;
for (int i = 0; i < instance.getNumComponents(); i++)
{
Vector uihat = f.getDimensionReducer().getDiscriminant().getRow( i );
for (int j = 0; j < i; j++)
{
Vector ujhat = f.getDimensionReducer().getDiscriminant().getRow( j );
assertEquals( "Dot product between " + i + " and " + j + " is too large!", 0.0, uihat.dotProduct( ujhat ), 1e-2 );
}
assertEquals( 1.0, uihat.norm2(), 1e-5 );
Vector ui = svd.getU().getColumn( i );
absnorm += Math.min( ui.minus( uihat ).norm2(), ui.minus( uihat.scale( -1 ) ).norm2() );
}
absnorm /= nc;
System.out.println( "U 1-norm: " + absnorm );
assertEquals( 0.0, absnorm, 1e-1 );
}
示例15: testKnownValues
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public void testKnownValues()
{
System.out.println( "Known Values" );
// EKF and KF should be approximately equal for a LDS
final int dim = 2;
Matrix A = MatrixFactory.getDefault().createIdentity(dim, dim);
Matrix B = MatrixFactory.getDefault().createIdentity(dim, dim);
Matrix C = MatrixFactory.getDefault().createIdentity(dim, dim);
LinearDynamicalSystem model = new LinearDynamicalSystem( A, B, C );
MultivariateDiscriminant outputModel =
new MultivariateDiscriminant( C );
Vector input = VectorFactory.getDefault().createVector(dim,0.1);
Matrix modelCovariance = MatrixFactory.getDefault().createIdentity(dim,dim);
Matrix outputCovariance = MatrixFactory.getDefault().createIdentity(dim,dim);
ExtendedKalmanFilter ekf = new ExtendedKalmanFilter(
model.clone(), outputModel, input, modelCovariance, outputCovariance );
KalmanFilter kalman = new KalmanFilter(
model.clone(), modelCovariance, outputCovariance );
MultivariateGaussian noiseMaker = new MultivariateGaussian(
VectorFactory.getDefault().createVector(dim), outputCovariance );
ArrayList<Vector> noise = noiseMaker.sample(RANDOM, 100);
ArrayList<Vector> ks = new ArrayList<Vector>( noise.size() );
for( int n = 0; n < noise.size(); n++ )
{
ks.add( model.evaluate(input).plus( noise.get(n) ) );
}
MultivariateGaussian gekf = ekf.learn(ks);
MultivariateGaussian gk = kalman.learn(ks);
System.out.println( "EKF:\n" + gekf );
System.out.println( "Kalman:\n" + gk );
final double EPS = 1e-1;
Vector m1 = gk.getMean();
Vector m2 = gekf.getMean();
if( !m1.equals(m2,EPS) )
{
assertEquals( m1, m2 );
}
Matrix C1 = gk.getCovariance();
Matrix C2 = gekf.getCovariance();
if( !C1.equals(C2,EPS) )
{
assertEquals( C1, C2 );
}
}