当前位置: 首页>>代码示例>>Java>>正文


Java Vector.subVector方法代码示例

本文整理汇总了Java中gov.sandia.cognition.math.matrix.Vector.subVector方法的典型用法代码示例。如果您正苦于以下问题:Java Vector.subVector方法的具体用法?Java Vector.subVector怎么用?Java Vector.subVector使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在gov.sandia.cognition.math.matrix.Vector的用法示例。


在下文中一共展示了Vector.subVector方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convertFromVector

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
public void convertFromVector(
    Vector parameters )
{

    int minIndex = 0;
    int maxIndex = -1;

    for (int i = 0; i < this.getLayers().size(); i++)
    {
        GeneralizedLinearModel layer = this.getLayers().get( i );
        Matrix matrix = layer.getDiscriminant().getDiscriminant();
        int num = matrix.getNumRows() * matrix.getNumColumns();

        minIndex = maxIndex + 1;
        maxIndex = minIndex + num - 1;

        Vector layerParameters = parameters.subVector( minIndex, maxIndex );
        layer.convertFromVector( layerParameters );
    }

}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:22,代码来源:FeedforwardNeuralNetwork.java

示例2: convertFromVector

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
public void convertFromVector(
    Vector parameters)
{
    int numInputs = this.getInputDimensionality();
    int numHidden = this.getHiddenDimensionality();
    int numOutput = this.getOutputDimensionality();

    int num1 = numInputs * numHidden;
    int num2 = numHidden;
    int num3 = numHidden * numOutput;
    int num4 = numOutput;
    int num = num1 + num2 + num3 + num4;
    parameters.assertDimensionalityEquals(num);

    Vector p1 = parameters.subVector(0,num1-1);
    Vector p2 = parameters.subVector(num1,num1+num2-1);
    Vector p3 = parameters.subVector(num1+num2,num1+num2+num3-1);
    Vector p4 = parameters.subVector(num1+num2+num3,num-1);

    this.inputToHiddenWeights.convertFromVector(p1);
    this.inputToHiddenBiasWeights = p2;
    this.hiddenToOutputWeights.convertFromVector(p3);
    this.hiddenToOutputBiasWeights = p4;
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:25,代码来源:ThreeLayerFeedforwardNeuralNetwork.java

示例3: testConvertToVector

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Test of convertToVector method, of class MultivariateDiscriminantWithBias.
 */
@Override
public void testConvertToVector()
{
    System.out.println("convertToVector");
    MultivariateDiscriminantWithBias instance = this.createRandom();
    int M = instance.getOutputDimensionality();
    int N = instance.getInputDimensionality();
    Vector p = instance.convertToVector();
    assertEquals( M*(N+1), p.getDimensionality() );

    Vector pd = p.subVector(0, M*N-1);
    Vector pb = p.subVector(M*N, M*(N+1)-1 );
    assertEquals( instance.getDiscriminant().convertToVector(), pd );
    assertEquals( instance.getBias(), pb );
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:19,代码来源:MultivariateDiscriminantWithBiasTest.java

示例4: convertFromVector

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
public void convertFromVector(
    Vector parameters)
{

    int Adim = this.A.getNumRows() * this.A.getNumColumns();
    int Bdim = this.B.getNumRows() * this.B.getNumColumns();
    if( Adim+Bdim != parameters.getDimensionality() )
    {
        throw new IllegalArgumentException(
            "Number of parameters doesn't equal A and B elements!" );
    }

    Vector av = parameters.subVector(0, Adim-1);
    Vector bv = parameters.subVector(Adim,parameters.getDimensionality()-1);

    this.A.convertFromVector(av);
    this.B.convertFromVector(bv);
    
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:20,代码来源:LinearDynamicalSystem.java

示例5: convertFromVector

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public void convertFromVector(
    Vector parameters)
{
    final int num =
        this.getInputDimensionality() * this.getOutputDimensionality();
    parameters.assertDimensionalityEquals(num + this.getOutputDimensionality());
    Vector mp = parameters.subVector(0,num-1);
    Vector bp = parameters.subVector(num, num+this.getOutputDimensionality()-1);
    super.convertFromVector( mp );
    this.bias.convertFromVector(bp);
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:13,代码来源:MultivariateDiscriminantWithBias.java

示例6: convertFromVector

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
public void convertFromVector(
    Vector parameters)
{
    final int d = this.getInputDimensionality();
    parameters.assertDimensionalityEquals( 1+d + 1+d*d );
    this.setCovarianceDivisor( parameters.getElement(0) );
    Vector mean = parameters.subVector(1, d);
    this.gaussian.setMean(mean);
    Vector iwp = parameters.subVector(d+1, parameters.getDimensionality()-1);
    this.inverseWishart.convertFromVector(iwp);
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:12,代码来源:NormalInverseWishartDistribution.java

示例7: convertFromVector

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public void convertFromVector(
    final Vector parameters)
{
    int p = this.getInputDimensionality();
    parameters.assertDimensionalityEquals( 1 + p*p );
    int dof = (int) Math.round(parameters.getElement(0));
    Vector matrix =
        parameters.subVector(1, parameters.getDimensionality()-1 );

    this.setDegreesOfFreedom(dof);
    this.getInverseScale().convertFromVector( matrix );
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:14,代码来源:InverseWishartDistribution.java

示例8: testKnownConvertToVector

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public void testKnownConvertToVector()
{
    System.out.println( "Known convertToVector" );
    MultivariateGaussian g = this.createInstance();
    Vector p = g.convertToVector();

    int d = g.getInputDimensionality();
    Vector mhat = p.subVector(0, d-1);
    assertEquals( g.getMean(), mhat );
    Vector Chat = p.subVector(d, p.getDimensionality()-1);
    Matrix C = MatrixFactory.getDefault().createMatrix(d, d);
    C.convertFromVector(Chat);
    assertEquals( g.getCovariance(), C );
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:16,代码来源:MultivariateGaussianTest.java

示例9: testKnownConvertToVector

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public void testKnownConvertToVector()
{
    System.out.println( "Known convertToVector" );

    InverseWishartDistribution instance = this.createInstance();
    Vector p = instance.convertToVector();
    int d = instance.getInputDimensionality();
    assertEquals( 1+d*d, p.getDimensionality() );
    assertEquals( instance.getDegreesOfFreedom(), (int) p.getElement(0) );
    Vector ip = p.subVector(1, p.getDimensionality()-1);
    Matrix IP = MatrixFactory.getDefault().createMatrix(d, d);
    IP.convertFromVector(ip);
    assertEquals( instance.getInverseScale(), IP );
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:16,代码来源:InverseWishartDistributionTest.java

示例10: learn

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Computes the linear regression for the given Collection of
 * InputOutputPairs.  The inputs of the pairs is the independent variable,
 * and the pair output is the dependent variable (variable to predict).
 * The pairs can have an associated weight to bias the regression equation.
 * @param data 
 * Collection of InputOutputPairs for the variables.  Can be 
 * WeightedInputOutputPairs.
 * @return 
 * LinearCombinationFunction that minimizes the RMS error of the outputs.
 */
@Override
public LinearDiscriminantWithBias learn(
    Collection<? extends InputOutputPair<? extends Vectorizable, Double>> data )
{

    // We need to cheat to figure out how many coefficients we need...
    // So we'll push the first sample through... wasteful, but general
    int numCoefficients = CollectionUtil.getFirst(data).getInput().convertToVector().getDimensionality();
    int numSamples = data.size();

    Matrix X = MatrixFactory.getDefault().createMatrix( numCoefficients+1, numSamples );
    Matrix Xt = MatrixFactory.getDefault().createMatrix( numSamples, numCoefficients+1 );
    Vector y = VectorFactory.getDefault().createVector( numSamples );

    Vector one = VectorFactory.getDefault().copyValues(1.0);
    int n = 0;
    for (InputOutputPair<? extends Vectorizable, Double> pair : data)
    {
        double output = pair.getOutput();
        Vector input = pair.getInput().convertToVector().stack(one);

        // We don't want Xt to have the weight factor too
        final double weight = DatasetUtil.getWeight(pair);
        if( weight != 1.0 )
        {
            // We can use scaleEquals() here because of the stack() method
            input.scaleEquals(weight);
            output *= weight;
        }
        Xt.setRow( n, input );
        X.setColumn( n, input );
        y.setElement( n, output );
        n++;
    }

    // Solve for the coefficients
    Vector coefficients;
    if( this.getUsePseudoInverse() )
    {
        Matrix pseudoInverse = X.pseudoInverse(DEFAULT_PSEUDO_INVERSE_TOLERANCE);
        coefficients = y.times( pseudoInverse );
    }
    else
    {
        Matrix lhs = X.times( Xt );
        if( this.regularization > 0.0 )
        {
            for( int i = 0; i < numSamples; i++ )
            {
                double v = lhs.getElement(i, i);
                lhs.setElement(i, i, v + this.regularization);
            }
        }
        Vector rhs = y.times( Xt );
        coefficients = lhs.solve( rhs );
    }

    Vector w = coefficients.subVector(0, numCoefficients-1);
    double bias = coefficients.getElement(numCoefficients);
    return new LinearDiscriminantWithBias( w, bias );
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:73,代码来源:LinearRegression.java

示例11: testComputeParameterGradient

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Test of computeParameterGradient method, of class FactorizationMachine.
 */
@Test
public void testComputeParameterGradient()
{
    VectorFactory<?> vf = VectorFactory.getSparseDefault();
    FactorizationMachine instance = new FactorizationMachine();
    Vector input = vf.createVector(0);
    Vector result = instance.computeParameterGradient(input);
    assertEquals(1, result.getDimensionality());
    assertEquals(1.0, result.getElement(0), 0.0);
    
    int d = 3;
    instance.setWeights(VectorFactory.getDenseDefault().createVector(d));
    input = vf.createUniformRandom(d, -10, 10, random);
    result = instance.computeParameterGradient(input);
    assertEquals(1 + d, result.getDimensionality());
    assertEquals(1.0, result.getElement(0), 0.0);
    assertEquals(input, result.subVector(1, d));
    
    int k = 2;
    instance.setFactors(MatrixFactory.getDenseDefault().createUniformRandom(k, d, -10, 10, random));
    input = vf.createUniformRandom(d, -10, 10, random);
    result = instance.computeParameterGradient(input);
    assertEquals(10, result.getDimensionality());
    assertEquals(1.0, result.getElement(0), 0.0);
    assertEquals(input, result.subVector(1, d));
    
    Vector factorGradients = result.subVector(d + 1, d + d * k);
    for (int f = 0; f < k; f++)
    {
        for (int l = 0; l < d; l++)
        {
            double actual = factorGradients.getElement(f * d + l);
            
            double expected = 0.0;
            for (int j = 0; j < d; j++)
            {
                if (j != l)
                {
                    double xl = input.getElement(l);
                    expected += xl * instance.getFactors().getElement(f, j) * input.getElement(j);
                }
            }
            assertEquals(expected, actual, epsilon);
        }
    }
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:50,代码来源:FactorizationMachineTest.java


注:本文中的gov.sandia.cognition.math.matrix.Vector.subVector方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。