本文整理汇总了Java中gov.sandia.cognition.math.matrix.Vector.minusEquals方法的典型用法代码示例。如果您正苦于以下问题:Java Vector.minusEquals方法的具体用法?Java Vector.minusEquals怎么用?Java Vector.minusEquals使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gov.sandia.cognition.math.matrix.Vector
的用法示例。
在下文中一共展示了Vector.minusEquals方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: removeClusterMember
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public boolean removeClusterMember(
final CentroidCluster<Vector> cluster,
final Vector member)
{
if (cluster.getMembers().remove(member))
{
final int newSize = cluster.getMembers().size();
Vector centroid = cluster.getCentroid();
if (newSize <= 0)
{
centroid.zero();
}
else
{
final Vector delta = member.minus(centroid);
delta.scaleEquals(1.0 / newSize);
centroid.minusEquals(delta);
}
return true;
}
else
{
return false;
}
}
示例2: computeParameterGradient
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
public Vector computeParameterGradient(
GradientDescendable function )
{
RingAccumulator<Vector> parameterDelta =
new RingAccumulator<Vector>();
double denominator = 0.0;
for (InputOutputPair<? extends Vector, ? extends Vector> pair : this.getCostParameters())
{
Vector input = pair.getInput();
Vector target = pair.getOutput();
Vector negativeError = function.evaluate( input );
negativeError.minusEquals( target );
double weight = DatasetUtil.getWeight(pair);
if (weight != 1.0)
{
negativeError.scaleEquals( weight );
}
denominator += weight;
Matrix gradient = function.computeParameterGradient( input );
Vector parameterUpdate = negativeError.times( gradient );
parameterDelta.accumulate( parameterUpdate );
}
Vector negativeSum = parameterDelta.getSum();
if (denominator != 0.0)
{
negativeSum.scaleEquals( 1.0 / denominator );
}
return negativeSum;
}
示例3: computeParameterGradientPartial
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
public Object computeParameterGradientPartial(
GradientDescendable function )
{
RingAccumulator<Vector> parameterDelta =
new RingAccumulator<Vector>();
double denominator = 0.0;
for (InputOutputPair<? extends Vector, ? extends Vector> pair : this.getCostParameters())
{
Vector input = pair.getInput();
Vector target = pair.getOutput();
Vector negativeError = function.evaluate( input );
negativeError.minusEquals( target );
double weight = DatasetUtil.getWeight(pair);
if (weight != 1.0)
{
negativeError.scaleEquals( weight );
}
denominator += weight;
Matrix gradient = function.computeParameterGradient( input );
Vector parameterUpdate = negativeError.times( gradient );
parameterDelta.accumulate( parameterUpdate );
}
Vector negativeSum = parameterDelta.getSum();
return new GradientPartialSSE( negativeSum, denominator );
}
示例4: computeParameterGradient
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Computes a forward-differences approximation to the parameter Jacobian
* @param function
* Internal VectorizableVectorFunction to consider
* @param input
* Input about which to estimate the Jacobian
* @param deltaSize
* Size of the finite-difference unit vectors, typically ~1e-5
* @return Forward-difference approximated Jacobian about the input
*/
public static Matrix computeParameterGradient(
VectorizableVectorFunction function,
Vector input,
double deltaSize )
{
// Compute the Jacobian approximation as a forward difference
Vector fx = function.evaluate( input );
int M = fx.getDimensionality();
Vector p = function.convertToVector();
int N = p.getDimensionality();
Matrix J = MatrixFactory.getDefault().createMatrix( M, N );
for (int j = 0; j < N; j++)
{
// Add a unit vector in the jth direction
double v = p.getElement( j );
p.setElement( j, v + deltaSize );
function.convertFromVector( p );
Vector fjx = function.evaluate( input );
fjx.minusEquals( fx );
fjx.scaleEquals( 1.0 / deltaSize );
J.setColumn( j, fjx );
p.setElement( j, v );
}
return J;
}
示例5: initializeAlgorithm
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
protected boolean initializeAlgorithm()
{
boolean retval = true;
this.setData(ObjectUtil.cloneSmartElementsAsArrayList(this.data));
int M = this.getNumComponents();
int N = this.getData().iterator().next().getDimensionality();
if( M > N )
{
retval = false;
throw new IllegalArgumentException(
"Number of EigenVectors must be <= dimension of Vectors" );
}
// Subtract the mean from the data
this.mean = MultivariateStatisticsUtil.computeMean( this.getData() );
for( Vector x : this.getData() )
{
x.minusEquals( this.mean );
}
this.components = new ArrayList<Vector>( M );
for( int i = 0; i < M; i++ )
{
// Make the ith vector be the identify for the ith direction
Vector ui = VectorFactory.getDefault().createVector( N );
ui.setElement( i, 1.0 );
this.components.add( ui );
}
this.change = 0.0;
return retval;
}
示例6: update
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public void update(
NormalInverseWishartDistribution prior,
Iterable<? extends Vector> data)
{
final int n = CollectionUtil.size(data);
Pair<Vector,Matrix> pair =
MultivariateStatisticsUtil.computeMeanAndCovariance(data);
Vector sampleMean = pair.getFirst();
Matrix sampleCovariance = pair.getSecond();
Vector lambda = prior.getGaussian().getMean();
double nu = prior.getCovarianceDivisor();
int alpha = prior.getInverseWishart().getDegreesOfFreedom();
Matrix beta = prior.getInverseWishart().getInverseScale();
int alphahat = alpha + n;
double nuhat = nu+n;
Vector lambdahat = lambda.scale(nu/n);
lambdahat.plusEquals( sampleMean );
lambdahat.scaleEquals( n/nuhat );
Vector delta = sampleMean;
delta.minusEquals(lambda);
Matrix betahat = sampleCovariance;
if( n > 1 )
{
betahat.scaleEquals(n);
}
betahat.plusEquals(beta);
betahat.plusEquals( delta.outerProduct(delta.scale((n*nu)/nuhat)) );
prior.getGaussian().setMean(lambdahat);
prior.setCovarianceDivisor(nuhat);
prior.getInverseWishart().setDegreesOfFreedom(alphahat);
prior.getInverseWishart().setInverseScale(betahat);
}
开发者ID:algorithmfoundry,项目名称:Foundry,代码行数:41,代码来源:MultivariateGaussianMeanCovarianceBayesianEstimator.java
示例7: testRemove
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Test of remove method, of class AbstractEntropyBasedGlobalTermWeighter.
*/
@Test
public void testRemove()
{
AbstractEntropyBasedGlobalTermWeighter instance =
new DummyEntropyBasedGlobalTermWeighter();
Vector expectedTermEntropiesSum = new Vector3();
instance.add(new Vector3(3.0, 0.0, 1.0));
instance.add(new Vector3());
instance.add(new Vector3(0.0, 1.0, 1.0));
expectedTermEntropiesSum.plusEquals(new Vector3(3.0 * Math.log(3.0), 0.0, 0.0));
assertEquals(3, instance.getDocumentCount());
assertEquals(new Vector3(1.0, 1.0, 2.0), instance.getTermDocumentFrequencies());
assertEquals(new Vector3(3.0, 1.0, 2.0), instance.getTermGlobalFrequencies());
assertEquals(expectedTermEntropiesSum, instance.getTermEntropiesSum());
instance.remove(new Vector3());
assertEquals(2, instance.getDocumentCount());
assertEquals(new Vector3(1.0, 1.0, 2.0), instance.getTermDocumentFrequencies());
assertEquals(new Vector3(3.0, 1.0, 2.0), instance.getTermGlobalFrequencies());
expectedTermEntropiesSum.minusEquals(new Vector3());
assertEquals(expectedTermEntropiesSum, instance.getTermEntropiesSum());
instance.remove(new Vector3(3.0, 0.0, 1.0));
assertEquals(1, instance.getDocumentCount());
assertEquals(new Vector3(0.0, 1.0, 1.0), instance.getTermDocumentFrequencies());
assertEquals(new Vector3(0.0, 1.0, 1.0), instance.getTermGlobalFrequencies());
expectedTermEntropiesSum.minusEquals(new Vector3(3.0 * Math.log(3.0), 0.0, 0.0));
assertEquals(expectedTermEntropiesSum, instance.getTermEntropiesSum());
instance.remove(new Vector3(0.0, 1.0, 1.0));
assertEquals(0, instance.getDocumentCount());
assertEquals(new Vector3(), instance.getTermDocumentFrequencies());
assertEquals(new Vector3(), instance.getTermGlobalFrequencies());
expectedTermEntropiesSum.minusEquals(new Vector3());
assertEquals(expectedTermEntropiesSum, instance.getTermEntropiesSum());
}
示例8: compute
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
/**
* Computes often-used parameters of a sum-squared error term
* @param objectToOptimize
* GradientDescendable to compute the statistics of
* @param data
* Dataset to consider
* @return
* Cache containing the cached cost-function parameters
*/
public static Cache compute(
GradientDescendable objectToOptimize,
Collection<? extends InputOutputPair<? extends Vector,Vector>> data )
{
RingAccumulator<Matrix> gradientAverage = new RingAccumulator<Matrix>();
RingAccumulator<Vector> gradientError = new RingAccumulator<Vector>();
// This is very close to the
// MeanSquaredErrorCostFunction.computeParameterGradient() method
double weightSum = 0.0;
double parameterCost = 0.0;
for (InputOutputPair<? extends Vector, ? extends Vector> pair : data)
{
// Compute the negativeError to save on Vector allocations
// (can't use pair.getOutput because we'll alter the dataset)
Vector negativeError = objectToOptimize.evaluate( pair.getInput() );
negativeError.minusEquals( pair.getOutput() );
double norm2 = negativeError.norm2Squared();
double weight = DatasetUtil.getWeight(pair);
if (weight != 1.0)
{
negativeError.scaleEquals( weight );
}
weightSum += weight;
parameterCost += norm2 * weight;
Matrix gradient =
objectToOptimize.computeParameterGradient( pair.getInput() );
gradientAverage.accumulate( gradient );
gradientError.accumulate( negativeError.times( gradient ) );
}
weightSum *= 2.0;
if( weightSum == 0.0 )
{
weightSum = 1.0;
}
// This is the Jacobian
Matrix J = gradientAverage.getSum();
J.scaleEquals( 1.0 / weightSum );
Matrix JtJ = J.transpose().times( J );
// Have to use 1.0 here because we've been accumulating the
// negativeError to save Vector allocations and the chain rule
// brings down the 2.0 from the exponent and we're already
// hitting the function with 0.5, so it's a wash.
Vector Jte = gradientError.getSum();
Jte.scaleEquals( 1.0 / weightSum );
// Make sure the cost is normalized by the weights
parameterCost /= weightSum;
return new Cache( J, JtJ, Jte, parameterCost );
}
示例9: step
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
protected boolean step()
{
// Reset the number of errors for the new iteration.
this.setErrorCount(0);
// Loop over all the training instances.
for (InputOutputPair<? extends Vectorizable, ? extends Boolean> example
: this.getData())
{
if (example == null)
{
continue;
}
// Compute the predicted classification and get the actual
// classification.
final Vector input = example.getInput().convertToVector();
final boolean actual = example.getOutput();
final double prediction = this.result.evaluateAsDouble(input);
if ( (actual && prediction <= this.marginPositive)
|| (!actual && prediction >= -this.marginNegative))
{
// The classification was incorrect so we need to update
// the perceptron.
this.setErrorCount(this.getErrorCount() + 1);
final Vector weights = this.result.getWeights();
double bias = this.result.getBias();
if (actual)
{
// Update for a positive example so add to the
// weights and the bias.
weights.plusEquals(input);
bias += 1.0;
}
else
{
// Update for a negative example so subtract from
// the weights and the bias.
weights.minusEquals(input);
bias -= 1.0;
}
// The weights are updated by side-effect.
// Update the bias directly.
this.result.setBias(bias);
}
// else - The classification was correct, no need to update.
}
// Keep going while the error count is positive.
return this.getErrorCount() > 0;
}
示例10: update
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public void update(
final LinearBinaryCategorizer target,
final Vector input,
final boolean label)
{
Vector weights = target.getWeights();
if (weights == null)
{
// This is the first example, so initialize the weight vector.
weights = this.getVectorFactory().createVector(
input.getDimensionality());
target.setWeights(weights);
}
// else - Use the existing weights.
// Predict the output as a double (negative values are false, positive
// are true).
final double prediction = target.evaluateAsDouble(input);
final double actual = label ? +1.0 : -1.0;
final double margin = prediction * actual;
boolean error = false;
if (margin <= 0.0)
{
// An actual mistake: Use the standard perceptron update rule.
error = true;
}
else
{
final double weightNorm = weights.norm2();
if (margin / weightNorm <= this.getRadius())
{
// This is one way to implement this. However, it is not as
// efficient as the following way with sparse vectors, which
// is based on the derivation:
// final Vector change = weights.scale(
// -actual * this.getRadius() / weightNorm);
// change.plusEquals(input);
// change.scaleEquals(actual);
// weights.plusEquals(change);
final double scale = 1.0 - this.getRadius() / weightNorm;
weights.scaleEquals(scale);
error = true;
}
// else - No margin mistake change.
}
if (error)
{
if (label)
{
weights.plusEquals(input);
}
else
{
weights.minusEquals(input);
}
}
}
示例11: removeClusterMember
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public boolean removeClusterMember(
final NormalizedCentroidCluster<Vectorizable> cluster,
final Vectorizable member)
{
if (cluster.getMembers().remove(member))
{
final int newSize = cluster.getMembers().size();
final int oldSize = newSize + 1;
if (newSize == 0)
{
cluster.setCentroid(null);
cluster.setNormalizedCentroid(null);
return true;
}
//reset centroid
Vectorizable centroid = cluster.getCentroid();
Vector data = centroid.convertToVector();
data.scaleEquals(oldSize);
data.minusEquals(member.convertToVector());
data.scaleEquals(1.0 / newSize);
centroid.convertFromVector(data);
cluster.setCentroid(centroid);
//reset normalized centroid
Vectorizable normalizedCentroid = cluster.getNormalizedCentroid();
Vector normalizedData = normalizedCentroid.convertToVector();
normalizedData.scaleEquals(oldSize);
if (member.convertToVector().norm2() != 0.0)
{
normalizedData.minusEquals(member.convertToVector().scale(1.0
/ member.convertToVector().norm2()));
}
normalizedData.scaleEquals(1.0 / newSize);
normalizedCentroid.convertFromVector(normalizedData);
cluster.setNormalizedCentroid(normalizedCentroid);
return true;
}
else
{
return false;
}
}
示例12: evaluate
import gov.sandia.cognition.math.matrix.Vector; //导入方法依赖的package包/类
@Override
public Vector evaluate(
final DataType input)
{
final int dataSize = this.data.size();
// Create the kernel vector.
final Vector kernelVector =
VectorFactory.getDenseDefault().createVector(dataSize);
int index = 0;
for (DataType other : this.data)
{
final double value = this.kernel.evaluate(input, other);
kernelVector.setElement(index, value);
index++;
}
// Transform the kernel vector, if needed.
final Vector kInput;
if (!this.centerData || this.kernelMatrix == null)
{
// In the case we don't need to center the data just use the
// kernel vector directly.
kInput = kernelVector;
}
else
{
// Center the input before applying the transform.
// Kt2 = Kt - 1'_m K - Kt 1_m + 1'_m K 1_m
// Where 1_m is a m x m with 1/m on the diagonal
// and 1'_m is an m-dimensional vector filled with 1/m
// and m is the data size
// and K is the original kernel m x m matrix
// and Kt is the m-dimensional vector of K(t, x_i)
final Matrix centeringMatrix =
MatrixFactory.getDiagonalDefault().createIdentity(
dataSize, dataSize);
centeringMatrix.scaleEquals(1.0 / dataSize);
final Vector centeringVector =
VectorFactory.getDenseDefault().createVector(
dataSize, 1.0 / dataSize);
kInput = kernelVector.clone();
kInput.minusEquals(centeringVector.times(this.kernelMatrix));
kInput.minusEquals(kernelVector.times(centeringMatrix));
kInput.plusEquals(centeringVector.times(
this.kernelMatrix.times(centeringMatrix)));
}
return this.components.times(kInput);
}