本文整理汇总了C#中Vector.Sum方法的典型用法代码示例。如果您正苦于以下问题:C# Vector.Sum方法的具体用法?C# Vector.Sum怎么用?C# Vector.Sum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Vector
的用法示例。
在下文中一共展示了Vector.Sum方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Compute
public double Compute(Vector x, Vector y)
{
if (x.Length != y.Length)
throw new InvalidOperationException("Cannot compute similarity between two unequally sized Vectors!");
var xSum = x.Sum();
var ySum = y.Sum();
return (x.Dot(y) - ((xSum * ySum) / x.Length)) / System.Math.Sqrt(((x ^ 2).Sum() - (System.Math.Pow(xSum, 2) / x.Length)) * ((y ^ 2).Sum() - (System.Math.Pow(ySum, 2) / y.Length)));
}
示例2: Integrate
//function that integrates signal
public Vector<double> Integrate(Vector<double> signal_rr)
{
Vector<double> signal_integrated = Vector<double>.Build.Dense(signal_rr.Count(),0);
//Average
double rr_avg = signal_rr.Sum()/signal_rr.Count;
for (int i = 0; i < signal_rr.Count - 1; i++)
{
signal_integrated[0] = 0;
signal_integrated[i+1] = signal_rr[i] - rr_avg;
signal_integrated[i + 1] += signal_integrated[i];
signal_integrated[i+1] = Math.Abs(signal_integrated[i+1]);
}
return signal_integrated;
}
示例3: CalculateGradientForMean
/// <summary>
/// Helper function to calculate gradient of the KL divergence with respect to the mean of the Dirichlet.
/// </summary>
/// <param name="meanPseudoCount">Pseudocount vector of the incoming message from 'mean'</param>
/// <param name="totalCount">Incoming message from 'totalCount'</param>
/// <param name="meanLogProb">E[log(prob)]</param>
/// <returns>Gradient of the KL divergence with respect to the mean of the Dirichlet</returns>
internal static Vector CalculateGradientForMean(Vector meanPseudoCount, Gamma totalCount, Vector meanLogProb)
{
// Compute required integrals
double[] EELogGamma;
double[] EELogMLogGamma;
double[] EELogOneMinusMLogGamma;
MeanMessageExpectations(
meanPseudoCount,
totalCount,
out EELogGamma,
out EELogMLogGamma,
out EELogOneMinusMLogGamma);
// Calculate gradients of ELogGamma(sm)
int K = meanPseudoCount.Count;
double meanTotalCount = meanPseudoCount.Sum();
Vector ELogM = Vector.Zero(K);
Vector B = Vector.Zero(K);
Vector A = Vector.Zero(K);
ELogM.SetToFunction(meanPseudoCount, MMath.Digamma);
ELogM.SetToDifference(ELogM, MMath.Digamma(meanTotalCount));
for (int k = 0; k < K; k++) {
A[k] = EELogMLogGamma[k] - ELogM[k] * EELogGamma[k];
double ELogOneMinusM = MMath.Digamma(meanTotalCount - meanPseudoCount[k])
- MMath.Digamma(meanTotalCount);
B[k] = EELogOneMinusMLogGamma[k] - ELogOneMinusM * EELogGamma[k];
}
Vector gradC = A - B + B.Sum();
// Calculate gradients of analytic part
double sum = 0;
for (int k = 0; k < K; k++)
sum += meanPseudoCount[k] * meanLogProb[k];
Vector gradS = Vector.Constant(K, -sum / (meanTotalCount * meanTotalCount));
for (int k = 0; k < K; k++)
gradS[k] += meanLogProb[k] / meanTotalCount;
gradS *= totalCount.GetMean();
gradS -= gradC;
return gradS;
}
示例4: AverageLogFactor
/// <summary>
/// Evidence message for VMP
/// </summary>
/// <param name="prob">Incoming message from 'prob'. Must be a proper distribution. If any element is uniform, the result will be uniform.</param>
/// <param name="mean">Constant value for 'mean'.</param>
/// <param name="totalCount">Incoming message from 'totalCount'. Must be a proper distribution. If uniform, the result will be uniform.</param>
/// <returns>Average of the factor's log-value across the given argument distributions</returns>
/// <remarks><para>
/// The formula for the result is <c>sum_(prob,totalCount) p(prob,totalCount) log(factor(prob,mean,totalCount))</c>.
/// Adding up these values across all factors and variables gives the log-evidence estimate for VMP.
/// </para></remarks>
/// <exception cref="ImproperMessageException"><paramref name="prob"/> is not a proper distribution</exception>
/// <exception cref="ImproperMessageException"><paramref name="totalCount"/> is not a proper distribution</exception>
public static double AverageLogFactor([SkipIfUniform] Dirichlet prob, Vector mean, [SkipIfUniform] Gamma totalCount)
{
double totalCountMean = totalCount.GetMean();
Vector probMeanLog = prob.GetMeanLog();
double sum = GammaFromShapeAndRateOp.ELogGamma(totalCount);
Gamma smk = new Gamma(totalCount);
sum += probMeanLog.Inner(mean, x => totalCountMean * x - 1.0);
sum += mean.Sum(x => { smk.Rate = totalCount.Rate / x; return -GammaFromShapeAndRateOp.ELogGamma(smk); });
return sum;
}
示例5: TotalCountMessageExpectations
/// <summary>
/// Perform the quadrature required for the Nonconjugate VMP message to 'totalCount'
/// </summary>
/// <param name="meanQPseudoCount">Incoming message from 'mean'.</param>
/// <param name="totalCountQ">Incoming message from 'totalCount'.</param>
/// <param name="EELogGamma">Array to be filled with E[LogGamma(s*m_k)].</param>
/// <param name="EELogSLogGamma">Array to be filled with E[Log(s)*LogGamma(s*m_k)].</param>
/// <param name="EEMSDigamma">Array to be filled with E[s*m_k*Digamma(s*m_k)].</param>
/// <remarks><para>
/// All three arrays are calculated simultaneously for efficiency. The quadrature over
/// 'totalCount' (which is Gamma-distributed) is peformed by a change of variable x=log(s)
/// followed by Gauss-Hermite quadrature. The quadrature over m is performed using
/// Gauss-Legendre.
/// </para></remarks>
public static void TotalCountMessageExpectations(
Vector meanQPseudoCount,
Gamma totalCountQ,
out double[] EELogGamma,
out double[] EELogSLogGamma,
out double[] EEMSDigamma)
{
// Get shape and rate of the distribution
double at = totalCountQ.Shape, bt = totalCountQ.Rate;
// Mean in the transformed domain
double proposalMean = totalCountQ.GetMeanLog();
// Laplace approximation of variance in transformed domain
double proposalVariance = 1 / at;
// Quadrature coefficient
int nt = 32;
Vector nodes = Vector.Zero(nt);
Vector weights = Vector.Zero(nt);
Vector expx = Vector.Zero(nt);
if (!totalCountQ.IsPointMass) {
Quadrature.GaussianNodesAndWeights(proposalMean, proposalVariance, nodes, weights);
// Precompute weights for each m slice
for (int i = 0; i < nt; i++) {
double x = nodes[i];
expx[i] = Math.Exp(x);
double p = at * x - bt * expx[i] - Gaussian.GetLogProb(x, proposalMean, proposalVariance);
weights[i] *= Math.Exp(p);
}
}
int nm = 20;
Vector mnodes = Vector.Zero(nm);
Vector mweight = Vector.Zero(nm);
Quadrature.UniformNodesAndWeights(0, 1, mnodes, mweight);
int K = meanQPseudoCount.Count;
Vector[] mweights = new Vector[K];
Beta[] mkDist = new Beta[K];
EELogGamma = new double[K];
EELogSLogGamma = new double[K];
EEMSDigamma = new double[K];
double meanQTotalCount = meanQPseudoCount.Sum();
for (int i = 0; i < K; i++) {
mweights[i] = Vector.Copy(mweight);
mkDist[i] = new Beta(meanQPseudoCount[i], meanQTotalCount - meanQPseudoCount[i]);
EELogGamma[i] = 0;
EELogSLogGamma[i] = 0;
EEMSDigamma[i] = 0;
}
for (int j = 0; j < nm; j++) {
double m = mnodes[j];
double ESDigamma = 0;
double ELogGamma = 0;
double ELogSLogGamma = 0;
if (totalCountQ.IsPointMass) {
ESDigamma = totalCountQ.Point * MMath.Digamma(m * totalCountQ.Point);
ELogGamma = MMath.GammaLn(m * totalCountQ.Point);
ELogSLogGamma = Math.Log(totalCountQ.Point) * ELogGamma;
} else {
// Calculate expectations in x=log(s) space using Gauss-Hermite quadrature
for (int i = 0; i < nt; i++) {
double x = nodes[i];
ELogGamma += weights[i] * (MMath.GammaLn(m * expx[i]) + x);
ESDigamma += weights[i] * (expx[i] * MMath.Digamma(m * expx[i]) + 1);
ELogSLogGamma += weights[i] * (x * MMath.GammaLn(m * expx[i]) + x * x + x * Math.Log(m));
}
// Normalise and add removed components
double normalisation = Math.Pow(bt, at) / MMath.Gamma(at);
ELogGamma = normalisation * ELogGamma - proposalMean;
ELogSLogGamma = normalisation * ELogSLogGamma
- (MMath.Trigamma(at) + proposalMean * proposalMean + Math.Log(m) * proposalMean);
ESDigamma = normalisation * ESDigamma - 1;
}
for (int i = 0; i < K; i++) {
mweights[i][j] *= Math.Exp(mkDist[i].GetLogProb(m));
EELogGamma[i] += mweights[i][j] * ELogGamma;
EELogSLogGamma[i] += mweights[i][j] * ELogSLogGamma;
EEMSDigamma[i] += mweights[i][j] * m * ESDigamma;
}
}
}
示例6: MeanMessageExpectations
/// <summary>
/// Perform the quadrature required for the Nonconjugate VMP message to 'mean'
/// </summary>
/// <param name="meanQPseudoCount">Incoming message from 'mean'.</param>
/// <param name="totalCountQ">Incoming message from 'totalCount'.</param>
/// <param name="EELogGamma">Array to be filled with E[LogGamma(s*m_k)].</param>
/// <param name="EELogMLogGamma">Array to be filled with E[Log(m_k)*LogGamma(s*m_k)].</param>
/// <param name="EELogOneMinusMLogGamma">Array to be filled with E[Log(1-m_k)*LogGamma(s*m_k)].</param>
/// <remarks><para>
/// All three arrays are calculated simultaneously for efficiency. The quadrature over
/// 'totalCount' (which is Gamma-distributed) is peformed by a change of variable x=log(s)
/// followed by Gauss-Hermite quadrature. The quadrature over m is performed using
/// Gauss-Legendre.
/// </para></remarks>
public static void MeanMessageExpectations(
Vector meanQPseudoCount,
Gamma totalCountQ,
out double[] EELogGamma,
out double[] EELogMLogGamma,
out double[] EELogOneMinusMLogGamma)
{
// Get shape and scale of the distribution
double at, bt;
at = totalCountQ.Shape;
bt = totalCountQ.Rate;
// Mean in the transformed domain
double ELogS = totalCountQ.GetMeanLog();
// Laplace approximation of variance in transformed domain
double proposalVariance = 1 / at;
// Quadrature coefficient
int nt = 32;
Vector nodes = Vector.Zero(nt);
Vector weights = Vector.Zero(nt);
Vector expx = Vector.Zero(nt);
if (!totalCountQ.IsPointMass) {
Quadrature.GaussianNodesAndWeights(ELogS, proposalVariance, nodes, weights);
// Precompute weights for each m slice
for (int i = 0; i < nt; i++) {
double x = nodes[i];
expx[i] = Math.Exp(x);
double p = at * x - bt * expx[i] - Gaussian.GetLogProb(x, ELogS, proposalVariance);
weights[i] *= Math.Exp(p);
}
}
int nm = 20;
Vector mnodes = Vector.Zero(nm);
Vector mweight = Vector.Zero(nm);
Quadrature.UniformNodesAndWeights(0, 1, mnodes, mweight);
int K = meanQPseudoCount.Count;
Vector[] mweights = new Vector[K];
Beta[] mkDist = new Beta[K];
EELogGamma = new double[K];
EELogMLogGamma = new double[K];
EELogOneMinusMLogGamma = new double[K];
double meanQTotalCount = meanQPseudoCount.Sum();
for (int i = 0; i < K; i++) {
mweights[i] = Vector.Copy(mweight);
mkDist[i] = new Beta(meanQPseudoCount[i], meanQTotalCount - meanQPseudoCount[i]);
EELogGamma[i] = 0;
EELogMLogGamma[i] = 0;
EELogOneMinusMLogGamma[i] = 0;
}
double ES = totalCountQ.GetMean();
double ESLogS = ELogS * ES + 1 / bt;
for (int j = 0; j < nm; j++) {
double m = mnodes[j];
double ELogGamma = 0;
if (totalCountQ.IsPointMass)
ELogGamma = MMath.GammaLn(m * totalCountQ.Point);
else {
// Calculate expectations in x=log(s) space using Gauss-Hermite quadrature
for (int i = 0; i < nt; i++)
ELogGamma += weights[i] * (MMath.GammaLn(m * expx[i]) + nodes[i]);
// Normalise and add removed components
double normalisation = Math.Pow(bt, at) / MMath.Gamma(at);
ELogGamma = normalisation * ELogGamma - ELogS;
}
double EELogMLogGammaTemp = Math.Log(m) * (ELogGamma + ELogS + Math.Log(m));
double EELogOneMinusMLogGammaTemp = Math.Log(1 - m) *
(ELogGamma - (.5 * Math.Log(2 * Math.PI) - .5 * ELogS
- .5 * Math.Log(m) + m * ESLogS + ES * m * Math.Log(m) - ES * m));
for (int i = 0; i < K; i++) {
mweights[i][j] *= Math.Exp(mkDist[i].GetLogProb(m));
EELogGamma[i] += mweights[i][j] * ELogGamma;
EELogMLogGamma[i] += mweights[i][j] * EELogMLogGammaTemp;
EELogOneMinusMLogGamma[i] += mweights[i][j] * EELogOneMinusMLogGammaTemp;
}
}
for (int i = 0; i < K; i++)
AddAnalyticComponent(
mkDist[i],
ELogS,
ES,
ESLogS,
//.........这里部分代码省略.........
示例7: TotalCountAverageLogarithmHelper
/// <summary>
/// VMP message to 'totalCount'. This functionality is separated out to allow use by BetaOp.
/// </summary>
/// <param name="meanPseudoCount">Pseudocount of incoming message from 'mean'. Must be a proper distribution. If any element is uniform, the result will be uniform.</param>
/// <param name="totalCount">Incoming message from 'totalCount'. Must be a proper distribution. If uniform, the result will be uniform.</param>
/// <param name="meanLogProb">E[log(prob)] from incoming message from 'prob'. Must be a proper distribution. If any element is uniform, the result will be uniform.</param>
/// <remarks><para>
/// The outgoing message here would not be Dirichlet distributed, so we use Nonconjugate VMP, which
/// sends the approximate factor ensuring the gradient of the KL wrt to the variational parameters match.
/// </para></remarks>
internal static Gamma TotalCountAverageLogarithmHelper(Vector meanPseudoCount, Gamma totalCount, Vector meanLogProb)
{
double[] EELogGamma;
double[] EELogSLogGamma;
double[] EEMSDigamma;
// 2D quadrature
TotalCountMessageExpectations(
meanPseudoCount,
totalCount,
out EELogGamma,
out EELogSLogGamma,
out EEMSDigamma);
double at = totalCount.Shape;
double bt = totalCount.Rate;
// Find required expectations using quadrature
Vector gradElogGamma = GammaFromShapeAndRateOp.CalculateDerivatives(totalCount);
Vector gradS = gradElogGamma;
Vector EM = Vector.Zero(meanPseudoCount.Count);
EM.SetToProduct(meanPseudoCount, 1.0 / meanPseudoCount.Sum());
double c = 0;
for (int k = 0; k < meanPseudoCount.Count; k++) {
gradS[0] -= EELogSLogGamma[k] - totalCount.GetMeanLog() * EELogGamma[k];
gradS[1] -= -EEMSDigamma[k] / bt;
c += EM[k] * meanLogProb[k];
}
// Analytic
gradS[0] += c / bt;
gradS[1] -= c * at / (bt * bt);
Matrix mat = new Matrix(2, 2);
mat[0, 0] = MMath.Trigamma(at);
mat[1, 0] = mat[0, 1] = -1 / bt;
mat[1, 1] = at / (bt * bt);
Vector v = GammaFromShapeAndRateOp.twoByTwoInverse(mat) * gradS;
return Gamma.FromShapeAndRate(v[0] + 1, v[1]);
}
示例8: PowerMethod
public static double PowerMethod(SquareMatrix A, double precision)
{
Vector x = new Vector(A.Count);
Vector y;
x[0] = 1;
bool key = true;
Vector lambda = new Vector(A.Count);
Vector candidate = new Vector(A.Count);
int effectiveCount = 0;
while (key)
{
y = A * x;
effectiveCount = x.Length;
for (int counter = 0; counter < x.Length; counter++)
{
if (Math.Abs(x[counter]) > precision)
{
candidate[counter] = y[counter] / x[counter];
}
else
{
candidate[counter] = 0;
effectiveCount--;
}
}
x = y / y.Norm();
key = (candidate - lambda).Norm() > precision;
if (key)
{
lambda = candidate;
candidate = new Vector(x.Length);
}
}
double result;
if (effectiveCount > 0)
{
result = lambda.Sum() / (double)effectiveCount;
}
else
{
result = 0;
}
return result;
}
示例9: CenterData
/// <summary>
/// Centers data to have mean zero along axis 0. This is here because
/// nearly all linear models will want their data to be centered.
/// If sample_weight is not None, then the weighted mean of X and y
/// is zero, and not the mean itself
/// </summary>
/// <param name="x"></param>
/// <param name="y"></param>
/// <param name="fitIntercept"></param>
/// <param name="normalize"></param>
/// <param name="sampleWeight"></param>
internal static CenterDataResult CenterData(
Matrix<double> x,
Matrix<double> y,
bool fitIntercept,
bool normalize = false,
Vector<double> sampleWeight = null)
{
Vector<double> xMean;
Vector<double> yMean = new DenseVector(y.ColumnCount);
Vector<double> xStd;
if (fitIntercept)
{
if (x is SparseMatrix)
{
xMean = DenseVector.Create(x.ColumnCount, i => 0.0);
xStd = DenseVector.Create(x.ColumnCount, i => 1.0);
}
else
{
if (sampleWeight == null)
{
xMean = x.MeanOfEveryColumn();
}
else
{
xMean = x.MulColumnVector(sampleWeight).SumOfEveryColumn().Divide(sampleWeight.Sum());
}
x = x.SubtractRowVector(xMean);
if (normalize)
{
xStd = new DenseVector(x.ColumnCount);
foreach (var row in x.RowEnumerator())
{
xStd.Add(row.Item2.PointwiseMultiply(row.Item2), xStd);
}
xStd.MapInplace(Math.Sqrt);
for (int i = 0; i < xStd.Count; i++)
{
if (xStd[i] == 0)
{
xStd[i] = 1;
}
}
x.DivRowVector(xStd, x);
}
else
{
xStd = DenseVector.Create(x.ColumnCount, i => 1.0);
}
}
if (sampleWeight == null)
{
yMean = y.MeanOfEveryColumn();
}
else
{
yMean = y.MulColumnVector(sampleWeight).SumOfEveryColumn() / sampleWeight.Sum();
}
y = y.Clone();
y = y.SubtractRowVector(yMean);
}
else
{
xMean = DenseVector.Create(x.ColumnCount, i => 0);
xStd = DenseVector.Create(x.ColumnCount, i => 1);
}
return new CenterDataResult { X = x, Y = y, xMean = xMean, yMean = yMean, xStd = xStd };
}