当前位置: 首页>>代码示例>>Java>>正文


Java Vector.dotProduct方法代码示例

本文整理汇总了Java中gov.sandia.cognition.math.matrix.Vector.dotProduct方法的典型用法代码示例。如果您正苦于以下问题:Java Vector.dotProduct方法的具体用法?Java Vector.dotProduct怎么用?Java Vector.dotProduct使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在gov.sandia.cognition.math.matrix.Vector的用法示例。


在下文中一共展示了Vector.dotProduct方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: evaluate

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Evaluates the kernel on the given inputs by first applying the vector
 * function to each input vector and then evaluating the kernel on the
 * results of the vector function. If no kernel is specified then the 
 * linear kernel (dot product) is used.
 *
 * @param  x The first item.
 * @param  y The second item.
 * @return The kernel evaluated on the two given objects.
 */
public double evaluate(
    final Vectorizable x, 
    final Vectorizable y)
{
    final Vector fx = this.function.evaluate(x.convertToVector());
    final Vector fy = this.function.evaluate(y.convertToVector());
    
    if ( this.kernel == null )
    {
        return fx.dotProduct(fy);
    }
    else
    {
        return this.kernel.evaluate(fx, fy);
    }
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:27,代码来源:VectorFunctionKernel.java

示例2: evaluateAsGaussian

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public UnivariateGaussian evaluateAsGaussian(
    final Vectorizable input)
{
    if (!this.isInitialized())
    {
        // Variance is not yet initialized.
        return new UnivariateGaussian();
    }
    else
    {
        final Vector x = input.convertToVector();
        return new UnivariateGaussian(
            this.evaluateAsDouble(x),
            x.dotProduct(x.dotTimes(this.getVariance())));
    }
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:18,代码来源:DiagonalConfidenceWeightedBinaryCategorizer.java

示例3: evaluate

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Evaluate the this function on the provided cluster.
 *
 * @param cluster The cluster to calculate the function on.
 * @return The result of applying this function to the cluster.
 */
public double evaluate(NormalizedCentroidCluster<V> cluster)
{
    double total = 1.0;

    Vector centroid = cluster.getCentroid().convertToVector();
    Vector normalizedCentroid
        = cluster.getNormalizedCentroid().convertToVector();

    //if centroid is 0.0, cosine measure returns 0.0
    if (centroid.norm2() != 0.0)
    {
        total -= centroid.dotProduct(normalizedCentroid) / centroid.norm2();
    }

    total *= cluster.getMembers().size();

    return total;
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:25,代码来源:WithinNormalizedCentroidClusterCosineDivergence.java

示例4: SufficientStatistic

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Creates a new instance of SufficientStatistic
 * @param prior
 * Prior on the weights
 */
public SufficientStatistic(
    MultivariateGaussianInverseGammaDistribution prior )
{
    super();

    if( prior != null )
    {
        Vector mean = prior.getMean();
        this.covarianceInverse =
            prior.getGaussian().getCovarianceInverse().clone();
        this.z = this.covarianceInverse.times( mean );

        double a0 = prior.getInverseGamma().getShape();
        double b0 = prior.getInverseGamma().getScale();
        this.count = (long) Math.ceil(2.0*a0);
        this.outputSumSquared = 2.0*b0 + mean.dotProduct(this.z);
    }
    else
    {
        this.covarianceInverse = null;
        this.z = null;
        this.count = 0;
        this.outputSumSquared = 0.0;
    }
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:31,代码来源:BayesianRobustLinearRegression.java

示例5: testEvaluate

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Test of evaluate method, of class gov.sandia.cognition.learning.kernel.WeightedKernel.
 */
public void testEvaluate()
{
    Vector zero = new Vector3();
    Vector x = new Vector3(RANDOM.nextGaussian(), RANDOM.nextGaussian(), RANDOM.nextGaussian());
    Vector y = new Vector3(RANDOM.nextGaussian(), RANDOM.nextGaussian(), RANDOM.nextGaussian());
    
    double weight = RANDOM.nextDouble();
    WeightedKernel<Vector> instance = new WeightedKernel<Vector>(
        weight, LinearKernel.getInstance());
    
    double expected = weight * x.dotProduct(y);
    assertEquals(expected, instance.evaluate(x, y));
    assertEquals(expected, instance.evaluate(y, x));
    assertEquals(0.0, instance.evaluate(x, zero));
    assertEquals(0.0, instance.evaluate(y, zero));
    assertEquals(0.0, instance.evaluate(zero, zero));
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:21,代码来源:WeightedKernelTest.java

示例6: testEvaluate

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Test of evaluate method, of class LinearDiscriminantWithBias.
 */
public void testEvaluate()
{
    System.out.println("evaluate");
    LinearDiscriminantWithBias instance = this.createInstance();

    final int N = instance.getInputDimensionality();
    Vector x = VectorFactory.getDefault().createVector(N, 0.0);
    double result = instance.evaluate(x);
    assertEquals( instance.getBias(), result );

    x = VectorFactory.getDefault().createUniformRandom(N, -1.0, 1.0,RANDOM);
    result = instance.evaluate(x);
    double expected = x.dotProduct( instance.getWeightVector() ) + instance.getBias();
    assertEquals( expected, result );
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:19,代码来源:LinearDiscriminantWithBiasTest.java

示例7: evaluate

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Evaluates the cosine distance between the two given vectors.
 * 
 * @param  first The first Vector.
 * @param  second The second Vector.
 * @return The cosine distance between the two given vectors.
 */
public double evaluate(
    final Vectorizable first,
    final Vectorizable second)
{
    // Ideally, we would just do:
    // return 1.0 - first.convertToVector().cosine(second.convertToVector());
    // But, we have a problem which is that zero is not similar to itself.
    // Thus, we manually decompose the cosine to make do that check.
    final Vector firstVector = first.convertToVector();
    final Vector secondVector = second.convertToVector();
    final double dotProduct = firstVector.dotProduct(secondVector);
    final double firstNormSquared = firstVector.norm2Squared();
    final double secondNormSquared = secondVector.norm2Squared();

    if (dotProduct == 0.0)
    {
        if (firstNormSquared == 0.0 && secondNormSquared == 0.0)
        {
            // Zero vectors are similar to themselves.
            return 0.0;
        }
        else
        {

            // The cosine would be 0.0, so the result is 1.0.
            return 1.0;
        }
    }
    else
    {
        // Compute the actual cosine.
        final double cosine = 
            dotProduct / Math.sqrt(firstNormSquared * secondNormSquared);

        // Change it from a similarity to a divergence.
        return 1.0 - cosine;
    }
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:46,代码来源:CosineDistanceMetric.java

示例8: evaluateAsDouble

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * A convenience method for evaluating a Vector object as a double, thus
 * avoiding the convertToVector call from Vectorizable. It calculates:
 *
 *     weights * input + bias
 *
 * @param   input
 *      The input value to convert to a vector.
 * @return
 *      The double result of multiplying the weight vector times the given
 *      vector and adding the bias. If the weight vector is null, bias is
 *      returned. The sign is treated as the categorization.
 */
public double evaluateAsDouble(
    final Vector input)
{
    if (this.weights == null)
    {
        // In the case the weights are uninitialized the result is the bias.
        return this.bias;
    }
    else
    {
        return input.dotProduct(this.weights) + this.bias;
    }
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:27,代码来源:LinearBinaryCategorizer.java

示例9: evaluateAsDouble

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * A convenience method for evaluating a Vector object as a double, thus
 * avoiding the convertToVector call from Vectorizable. It calculates:
 *
 *     weights * input + bias
 *
 * @param   input
 *      The input value to convert to a vector.
 * @return
 *      The double result of multiplying the weight vector times the given
 *      vector and adding the bias. If the weight vector is null, bias is
 *      returned.
 */
public double evaluateAsDouble(
    final Vector input)
{
    if (this.weights == null)
    {
        // In the case the weights are uninitialized the result is the bias.
        return this.bias;
    }
    else
    {
        return input.dotProduct(this.weights) + this.bias;
    }
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:27,代码来源:LinearVectorScalarFunction.java

示例10: computeScaleFactor

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
protected double computeScaleFactor(
    Vector gradientCurrent,
    Vector gradientPrevious )
{
    Vector direction = this.lineFunction.getDirection();
    
    Vector deltaGradient = gradientCurrent.minus( gradientPrevious );
    double deltaTgradient = deltaGradient.dotProduct( gradientCurrent );
    double denom = gradientPrevious.dotProduct( direction );
    double beta = -deltaTgradient / denom;
    return beta;
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:14,代码来源:FunctionMinimizerLiuStorey.java

示例11: computeScaleFactor

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
protected double computeScaleFactor(
    Vector gradientCurrent,
    Vector gradientPrevious )
{
    Vector deltaGradient = gradientCurrent.minus( gradientPrevious );
    double deltaTgradient = deltaGradient.dotProduct( gradientCurrent );
    double denom = gradientPrevious.norm2Squared();
    
    double beta = deltaTgradient / denom;
    return beta;
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:12,代码来源:FunctionMinimizerPolakRibiere.java

示例12: evaluate

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public StudentTDistribution evaluate(
    Vectorizable input)
{
    Vector x = input.convertToVector();
    double mean = x.dotProduct( this.posterior.getMean() );
    double dofs = this.posterior.getInverseGamma().getShape() * 2.0;
    double v = x.times( this.posterior.getGaussian().getCovariance() ).dotProduct(x);
    double anbn = this.posterior.getInverseGamma().getShape() / this.posterior.getInverseGamma().getScale();
    double precision = anbn / (1.0 + v);
    return new StudentTDistribution( dofs, mean, precision );
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:13,代码来源:BayesianRobustLinearRegression.java

示例13: testEvaluate

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Test of evaluate method, of class gov.sandia.cognition.learning.kernel.VectorFunctionKernel.
 */
public void testEvaluate()
{
    LinearVectorFunction function = new LinearVectorFunction(RANDOM.nextGaussian());
    VectorFunctionKernel instance = new VectorFunctionKernel(function);
    
    PolynomialKernel kernel = new PolynomialKernel(4, RANDOM.nextDouble());
    
    int count = 10;
    for (int i = 0; i < count; i++)
    {
        Vector x = new Vector3(RANDOM.nextGaussian(), RANDOM.nextGaussian(), RANDOM.nextGaussian());
        Vector y = new Vector3(RANDOM.nextGaussian(), RANDOM.nextGaussian(), RANDOM.nextGaussian());
    
        Vector fx = function.evaluate(x);
        Vector fy = function.evaluate(y);
        
        instance.setKernel(null);
        double expected = fx.dotProduct(fy);
        assertEquals(expected, instance.evaluate(x, y));
        assertEquals(expected, instance.evaluate(y, x));
        
        instance.setKernel(kernel);
        expected = kernel.evaluate(fx, fy);
        
        assertEquals(expected, instance.evaluate(x, y));
        assertEquals(expected, instance.evaluate(y, x));
    }
}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:32,代码来源:VectorFunctionKernelTest.java

示例14: testLearn

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
     * Test of learn method, of class SimplifiedSequentialMinimalOptimization.
     */
    public void testLearn()
    {

        // Generate some data using the example synthetic data from Platt's
        // original SMO paper.
        int d = 300;
        int pointsToGenerate = 100;
        final ArrayList<InputOutputPair<Vector, Boolean>> data =
            new ArrayList<InputOutputPair<Vector, Boolean>>(pointsToGenerate);

        Vector target = VectorFactory.getDenseDefault().createUniformRandom(d, -1.0, 1.0, random);
        while (data.size() < pointsToGenerate)
        {
            Vector input = VectorFactory.getSparseDefault().createVector(d, 0.0);

            for (int i = 0; i < d / 10; i++)
            {
                int index = random.nextInt(d);
                input.setElement(index, 1.0);
            }

            double dotProduct = input.dotProduct(target);
            if (dotProduct < -1.0)
            {
                data.add(DefaultInputOutputPair.create(input, false));
            }
            else if (dotProduct > +1.0)
            {
                data.add(DefaultInputOutputPair.create(input, true));
            }
            // else - The dot product wsa between -1.0 and +1.0, try again.
        }

        SequentialMinimalOptimization<Vector> instance =
            new SequentialMinimalOptimization<Vector>();
        instance.setKernel(new LinearKernel());
        instance.setRandom(random);
        instance.setMaxIterations(1000);
        instance.setMaxPenalty(100.0);
instance.setKernelCacheSize(0);

        final KernelBinaryCategorizer<Vector, ?> result = instance.learn(data);
        assertSame(result, instance.getResult());

        for (InputOutputPair<Vector, Boolean> example : data)
        {
//            System.out.println("" + example.getInput() + " -> " + example.getOutput());
            assertEquals(example.getOutput(), result.evaluate(example.getInput()));
        }
    }
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:54,代码来源:SequentialMinimalOptimizationTest.java

示例15: update

import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
 * Perform an update for the target using the given input and associated
 * label.
 *
 * @param   target
 *      The target to update.
 * @param   input
 *      The input value.
 * @param   label
 *      The label associated with the input.
 */
public void update(
    final DefaultConfidenceWeightedBinaryCategorizer target,
    final Vector input,
    final boolean label)
{
    // Get the mean and variance of the thing we will learn, which are
    // the parameters we will update.
    final Vector mean;
    final Matrix covariance;
    if (!target.isInitialized())
    {
        // Initialize the mean to zero and the variance to the default value
        // that we were given.
        final int dimensionality = input.getDimensionality();
        mean = VectorFactory.getDenseDefault().createVector(dimensionality);
        covariance = MatrixFactory.getDenseDefault().createIdentity(
            dimensionality, dimensionality);

        target.setMean(mean);
        target.setCovariance(covariance);
    }
    else
    {
        mean = target.getMean();
        covariance = target.getCovariance();
    }

    // Compute the predicted and actual values.
    final double predicted = input.dotProduct(mean);
    final double actual = label ? +1.0 : -1.0;

    // Now compute the margin (m_t) and variance (v_t).
    final double margin = actual * predicted;
    
    final boolean error = margin < 1.0;
    if (error)
    {
        final Vector covarianceTimesInput = input.times(covariance);
        final double marginVariance = covarianceTimesInput.dotProduct(input);
        
        final double beta = 1.0 / (marginVariance + this.r);
        final double alpha = Math.max(0.0, 1.0 - margin) * beta;
        
        final Vector meanUpdate = input.times(covariance);
        meanUpdate.scaleEquals(alpha * actual);
        mean.plusEquals(meanUpdate);
        
        final Matrix covarianceUpdate = covarianceTimesInput.outerProduct(
            covarianceTimesInput);
        covarianceUpdate.scaleEquals(-beta);
        covariance.plusEquals(covarianceUpdate);
    }

}
 
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:66,代码来源:AdaptiveRegularizationOfWeights.java


注:本文中的gov.sandia.cognition.math.matrix.Vector.dotProduct方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。