当前位置: 首页>>代码示例>>Java>>正文


Java Vector.scaleEquals方法代码示例

本文整理汇总了Java中gov.sandia.cognition.math.matrix.Vector.scaleEquals方法的典型用法代码示例。如果您正苦于以下问题:Java Vector.scaleEquals方法的具体用法?Java Vector.scaleEquals怎么用?Java Vector.scaleEquals使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在gov.sandia.cognition.math.matrix.Vector的用法示例。


在下文中一共展示了Vector.scaleEquals方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: sample

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public Vector sample(
    final Random random)
{
    // Create the result vector.
    final int K = this.getParameters().getDimensionality();
    final Vector y = VectorFactory.getDenseDefault().createVector(K);
    double sum = 0.0;
    for (int i = 0; i < K; i++)
    {
        final double yi = GammaDistribution.sampleStandard(
            this.parameters.get(i), random);
        y.set(i, yi);
        sum += yi;
    }
    
    if (sum != 0.0)
    {
        y.scaleEquals(1.0 / sum);
    }
    
    return y;
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:24,代码来源:DirichletDistribution.java

示例2: computeParameterGradientAmalgamate

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
public Vector computeParameterGradientAmalgamate(
    Collection<Object> partialResults )
{
    RingAccumulator<Vector> numerator = new RingAccumulator<Vector>();
    double denominator = 0.0;
    for( Object result : partialResults )
    {
        GradientPartialSSE sse = (GradientPartialSSE) result;
        
        numerator.accumulate( sse.getFirst() );
        denominator += sse.getSecond();
    }
    
    Vector scaleSum = numerator.getSum();
    if( denominator != 0.0 )
    {
        scaleSum.scaleEquals( 1.0 / (2.0*denominator) );
    }
    return scaleSum;
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:21,代码来源:SumSquaredErrorCostFunction.java

示例3: addClusterMember

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public void addClusterMember(
    final CentroidCluster<Vector> cluster,
    final Vector member)
{
    Vector centroid = cluster.getCentroid();
    if (centroid == null)
    {
        centroid = member.clone();
        cluster.setCentroid(centroid);
    }
    else
    {
        final int oldSize = cluster.getMembers().size();
        final int newSize = oldSize + 1;
        final Vector delta = member.minus(centroid);
        delta.scaleEquals(1.0 / newSize);
        centroid.plusEquals(delta);
    }
    cluster.getMembers().add(member);
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:22,代码来源:VectorMeanCentroidClusterCreator.java

示例4: removeClusterMember

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public boolean removeClusterMember(
    final CentroidCluster<Vector> cluster,
    final Vector member)
{
    if (cluster.getMembers().remove(member))
    {
        final int newSize = cluster.getMembers().size();
        Vector centroid = cluster.getCentroid();
        if (newSize <= 0)
        {
            centroid.zero();
        }
        else
        {
            final Vector delta = member.minus(centroid);
            delta.scaleEquals(1.0 / newSize);
            centroid.minusEquals(delta);
        }
        return true;
    }
    else
    {
        return false;
    }
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:27,代码来源:VectorMeanCentroidClusterCreator.java

示例5: computeLocalWeights

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public Vector computeLocalWeights(
    final Vector counts)
{
    // Compute the local weights.
    final Vector result = super.computeLocalWeights(counts);

    final int dimensionality = result.getDimensionality();
    if (dimensionality != 0)
    {
        final double average = counts.norm1() / dimensionality;
        final double divisor = Math.log(1.0 + average);
        result.scaleEquals(1.0 / divisor);
    }

    return result;
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:18,代码来源:NormalizedLogLocalTermWeighter.java

示例6: computeForwardProbabilities

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Computes the recursive solution to the forward probabilities of the
 * HMM.
 * @param alpha
 * Previous alpha value.
 * @param b
 * Current observation-emission likelihood.
 * @param normalize
 * True to normalize the alphas, false to leave them unnormalized.
 * @return
 * Alpha with the associated weighting (will be 1 if unnormalized).
 */
protected WeightedValue<Vector> computeForwardProbabilities(
    Vector alpha,
    Vector b,
    boolean normalize )
{
    Vector alphaNext = this.getTransitionProbability().times( alpha );
    alphaNext.dotTimesEquals(b);

    double weight;
    if( normalize )
    {
        weight = 1.0/alphaNext.norm1();
        alphaNext.scaleEquals(weight);
    }
    else
    {
        weight = 1.0;
    }

    return new DefaultWeightedValue<Vector>( alphaNext, weight );

}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:35,代码来源:HiddenMarkovModel.java

示例7: testConvertFromVector

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Test of convertFromVector method, of class gov.sandia.cognition.learning.algorithm.gradient.GradientDescendableApproximator.
 */
public void testConvertFromVector()
{
    System.out.println("convertFromVector");

    GradientDescendableApproximator instance = this.createInstance();
    GradientDescendableApproximator clone = instance.clone();

    Vector p = instance.convertToVector();
    p.scaleEquals(Math.random());
    instance.convertFromVector(p);
    assertEquals(p, instance.convertToVector());

    Vector pc = clone.convertToVector();
    assertFalse(pc.equals(instance.convertToVector()));
    instance.convertFromVector(pc);
    assertEquals(pc, instance.convertToVector());


}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:23,代码来源:GradientDescendableApproximatorTest.java

示例8: testGetSteadyStateDistribution

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Test of getSteadyStateDistribution method, of class MarkovChain.
 */
public void testGetSteadyStateDistribution()
{
    System.out.println("getSteadyStateDistribution");
    MarkovChain instance = this.createInstance();

    Vector phat = instance.getSteadyStateDistribution();

    EigenDecompositionRightMTJ evd = EigenDecompositionRightMTJ.create(
        (DenseMatrix) instance.getTransitionProbability() );

    Vector p = evd.getEigenVectorsRealPart().getColumn(0);
    // We do the manual sum (instead of norm1) in case the EVD found
    // the negative of the eigenvector.
    double sum = 0.0;
    for( int i = 0; i < p.getDimensionality(); i++ )
    {
        sum += p.getElement(i);
    }
    p.scaleEquals( 1.0/sum );

    System.out.println( "P: " + p );
    System.out.println( "Phat: " + phat );
    assertTrue( p.equals( phat, TOLERANCE ) );
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:28,代码来源:MarkovChainTest.java

示例9: computeParameterGradient

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
public Vector computeParameterGradient(
    GradientDescendable function )
{

    RingAccumulator<Vector> parameterDelta =
        new RingAccumulator<Vector>();

    double denominator = 0.0;

    for (InputOutputPair<? extends Vector, ? extends Vector> pair : this.getCostParameters())
    {
        Vector input = pair.getInput();
        Vector target = pair.getOutput();

        Vector negativeError = function.evaluate( input );
        negativeError.minusEquals( target );

        double weight = DatasetUtil.getWeight(pair);

        if (weight != 1.0)
        {
            negativeError.scaleEquals( weight );
        }

        denominator += weight;

        Matrix gradient = function.computeParameterGradient( input );
        Vector parameterUpdate = negativeError.times( gradient );
        parameterDelta.accumulate( parameterUpdate );
    }

    Vector negativeSum = parameterDelta.getSum();
    if (denominator != 0.0)
    {
        negativeSum.scaleEquals( 1.0 / denominator );
    }

    return negativeSum;

}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:41,代码来源:MeanSquaredErrorCostFunction.java

示例10: testCDFConvertToVector

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Tests convertToVector
 */
public void testCDFConvertToVector()
{

    System.out.println( "CDF.convertToVector" );

    ClosedFormUnivariateDistribution<NumberType> instance = this.createInstance();
    ClosedFormCumulativeDistributionFunction<NumberType> cdf = instance.getCDF();

    assertEquals( instance.convertToVector(), cdf.convertToVector() );

    // Should have at least one parameter
    Vector x1 = cdf.convertToVector();
    assertNotNull( x1 );
    assertTrue( x1.getDimensionality() > 0 );

    // Should return the equal parameterization
    Vector x2 = cdf.convertToVector();
    assertNotNull( x2 );
    assertNotSame( x1, x2 );
    assertEquals( x1.getDimensionality(), x2.getDimensionality() );
    assertEquals( x1, x2 );

    // Parameterization shouldn't be effected by changed returned parameters
    x2.setElement( 0, x2.getElement( 0 ) + RANDOM.nextDouble() );
    x2.scaleEquals( RANDOM.nextDouble() );
    Vector x3 = cdf.convertToVector();
    assertNotNull( x3 );
    assertFalse( x2.equals( x3 ) );
    assertEquals( x1, x3 );

}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:35,代码来源:ClosedFormUnivariateDistributionTestHarness.java

示例11: sampleInto

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public void sampleInto(
    final Random random,
    final int numSamples,
    final Collection<? super Vector> output)
{
    GammaDistribution.CDF gammaRV = new GammaDistribution.CDF(1.0, 1.0);

    int K = this.getParameters().getDimensionality();
    double[][] gammaData = new double[K][];
    for (int i = 0; i < K; i++)
    {
        double ai = this.parameters.get(i);
        gammaRV.setShape(ai);
        gammaData[i] = gammaRV.sampleAsDoubles(random, numSamples);
    }

    for (int n = 0; n < numSamples; n++)
    {
        Vector y = VectorFactory.getDenseDefault().createVector(K);
        double sum = 0.0;
        for (int i = 0; i < K; i++)
        {
            double yin = gammaData[i][n];
            y.set(i, yin);
            sum += yin;
        }
        if (sum != 0.0)
        {
            y.scaleEquals(1.0 / sum);
        }
        output.add(y);
    }
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:35,代码来源:DirichletDistribution.java

示例12: updateInitialProbabilities

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Updates the initial probabilities from sequenceGammas
 * @param firstGammas
 * The first gamma of the each sequence
 * @return
 * Updated initial probability Vector for the HMM.
 */
protected Vector updateInitialProbabilities(
    ArrayList<Vector> firstGammas )
{
    RingAccumulator<Vector> pi = new RingAccumulator<Vector>();
    for( int k = 0; k < firstGammas.size(); k++ )
    {
        pi.accumulate( firstGammas.get(k) );
    }
    Vector pisum = pi.getSum();
    pisum.scaleEquals( 1.0 / pisum.norm1() );
    return pisum;
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:20,代码来源:BaumWelchAlgorithm.java

示例13: computeBackwardProbabilities

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Computes the backward probability recursion.
 * @param beta
 * Beta from the "next" time step.
 * @param b
 * Observation likelihood from the "next" time step.
 * @param weight
 * Weight to use for the current time step.
 * @return
 * Beta for the previous time step, weighted by "weight".
 */
protected WeightedValue<Vector> computeBackwardProbabilities(
    Vector beta,
    Vector b,
    double weight )
{
    Vector betaPrevious = b.dotTimes(beta);
    betaPrevious = betaPrevious.times( this.getTransitionProbability() );
    if( weight != 1.0 )
    {
        betaPrevious.scaleEquals(weight);
    }
    return new DefaultWeightedValue<Vector>( betaPrevious, weight );
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:25,代码来源:HiddenMarkovModel.java

示例14: evaluate

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public Vector evaluate(
    Vector input)
{
    Vector state = this.getState();
    state.scaleEquals(0.9);
    state.plusEquals(input);
    this.setState(state);
    return state;
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:11,代码来源:ExtendedKalmanFilterTest.java

示例15: WineDataset

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Loads the wine dataset from wine.data
 * 
 * @param normalise
 *            whether to mean center the dataset
 * @param clusters
 *            valid clusters, if empty all clusters are chosen
 */
public WineDataset(boolean normalise, Integer... clusters) {
	final BufferedReader br = new BufferedReader(
			new InputStreamReader(WineDataset.class.getResourceAsStream("wine.data")));
	String line = null;
	Vector mean = null;
	Set<Integer> clusterSet = null;
	if (clusters.length != 0) {
		clusterSet = new HashSet<Integer>();
		clusterSet.addAll(Arrays.asList(clusters));
	}

	try {
		while ((line = br.readLine()) != null) {
			final String[] parts = line.split(",");
			final int cluster = Integer.parseInt(parts[0].trim());
			if (clusterSet != null && !clusterSet.contains(cluster))
				continue;
			final double[] data = new double[parts.length - 1];
			for (int i = 0; i < data.length; i++) {
				data[i] = Double.parseDouble(parts[i + 1]);
			}

			ListDataset<double[]> ds = this.get(cluster);
			if (ds == null)
				this.put(cluster, ds = new ListBackedDataset<double[]>());
			ds.add(data);
			final Vector copyArray = VectorFactory.getDefault().copyArray(data);
			if (mean == null) {
				mean = copyArray.clone();
			}
			else {
				mean.plusEquals(copyArray);
			}
		}
		mean.scaleEquals(1. / this.numInstances());
		if (normalise) {
			normalise(mean);
		}
	} catch (final Exception e) {
		logger.error("Wine dataset failed to load", e);
	}
}
 
开发者ID:openimaj,项目名称:openimaj,代码行数:51,代码来源:WineDataset.java


注:本文中的gov.sandia.cognition.math.matrix.Vector.scaleEquals方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。