本文整理汇总了C#中Gamma.GetMean方法的典型用法代码示例。如果您正苦于以下问题:C# Gamma.GetMean方法的具体用法?C# Gamma.GetMean怎么用?C# Gamma.GetMean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Gamma
的用法示例。
在下文中一共展示了Gamma.GetMean方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: MeanMessageExpectations
/// <summary>
/// Perform the quadrature required for the Nonconjugate VMP message to 'mean'
/// </summary>
/// <param name="meanQPseudoCount">Incoming message from 'mean'.</param>
/// <param name="totalCountQ">Incoming message from 'totalCount'.</param>
/// <param name="EELogGamma">Array to be filled with E[LogGamma(s*m_k)].</param>
/// <param name="EELogMLogGamma">Array to be filled with E[Log(m_k)*LogGamma(s*m_k)].</param>
/// <param name="EELogOneMinusMLogGamma">Array to be filled with E[Log(1-m_k)*LogGamma(s*m_k)].</param>
/// <remarks><para>
/// All three arrays are calculated simultaneously for efficiency. The quadrature over
/// 'totalCount' (which is Gamma-distributed) is peformed by a change of variable x=log(s)
/// followed by Gauss-Hermite quadrature. The quadrature over m is performed using
/// Gauss-Legendre.
/// </para></remarks>
public static void MeanMessageExpectations(
Vector meanQPseudoCount,
Gamma totalCountQ,
out double[] EELogGamma,
out double[] EELogMLogGamma,
out double[] EELogOneMinusMLogGamma)
{
// Get shape and scale of the distribution
double at, bt;
at = totalCountQ.Shape;
bt = totalCountQ.Rate;
// Mean in the transformed domain
double ELogS = totalCountQ.GetMeanLog();
// Laplace approximation of variance in transformed domain
double proposalVariance = 1 / at;
// Quadrature coefficient
int nt = 32;
Vector nodes = Vector.Zero(nt);
Vector weights = Vector.Zero(nt);
Vector expx = Vector.Zero(nt);
if (!totalCountQ.IsPointMass) {
Quadrature.GaussianNodesAndWeights(ELogS, proposalVariance, nodes, weights);
// Precompute weights for each m slice
for (int i = 0; i < nt; i++) {
double x = nodes[i];
expx[i] = Math.Exp(x);
double p = at * x - bt * expx[i] - Gaussian.GetLogProb(x, ELogS, proposalVariance);
weights[i] *= Math.Exp(p);
}
}
int nm = 20;
Vector mnodes = Vector.Zero(nm);
Vector mweight = Vector.Zero(nm);
Quadrature.UniformNodesAndWeights(0, 1, mnodes, mweight);
int K = meanQPseudoCount.Count;
Vector[] mweights = new Vector[K];
Beta[] mkDist = new Beta[K];
EELogGamma = new double[K];
EELogMLogGamma = new double[K];
EELogOneMinusMLogGamma = new double[K];
double meanQTotalCount = meanQPseudoCount.Sum();
for (int i = 0; i < K; i++) {
mweights[i] = Vector.Copy(mweight);
mkDist[i] = new Beta(meanQPseudoCount[i], meanQTotalCount - meanQPseudoCount[i]);
EELogGamma[i] = 0;
EELogMLogGamma[i] = 0;
EELogOneMinusMLogGamma[i] = 0;
}
double ES = totalCountQ.GetMean();
double ESLogS = ELogS * ES + 1 / bt;
for (int j = 0; j < nm; j++) {
double m = mnodes[j];
double ELogGamma = 0;
if (totalCountQ.IsPointMass)
ELogGamma = MMath.GammaLn(m * totalCountQ.Point);
else {
// Calculate expectations in x=log(s) space using Gauss-Hermite quadrature
for (int i = 0; i < nt; i++)
ELogGamma += weights[i] * (MMath.GammaLn(m * expx[i]) + nodes[i]);
// Normalise and add removed components
double normalisation = Math.Pow(bt, at) / MMath.Gamma(at);
ELogGamma = normalisation * ELogGamma - ELogS;
}
double EELogMLogGammaTemp = Math.Log(m) * (ELogGamma + ELogS + Math.Log(m));
double EELogOneMinusMLogGammaTemp = Math.Log(1 - m) *
(ELogGamma - (.5 * Math.Log(2 * Math.PI) - .5 * ELogS
- .5 * Math.Log(m) + m * ESLogS + ES * m * Math.Log(m) - ES * m));
for (int i = 0; i < K; i++) {
mweights[i][j] *= Math.Exp(mkDist[i].GetLogProb(m));
EELogGamma[i] += mweights[i][j] * ELogGamma;
EELogMLogGamma[i] += mweights[i][j] * EELogMLogGammaTemp;
EELogOneMinusMLogGamma[i] += mweights[i][j] * EELogOneMinusMLogGammaTemp;
}
}
for (int i = 0; i < K; i++)
AddAnalyticComponent(
mkDist[i],
ELogS,
ES,
ESLogS,
//.........这里部分代码省略.........
示例2: CalculateGradientForMean
/// <summary>
/// Helper function to calculate gradient of the KL divergence with respect to the mean of the Dirichlet.
/// </summary>
/// <param name="meanPseudoCount">Pseudocount vector of the incoming message from 'mean'</param>
/// <param name="totalCount">Incoming message from 'totalCount'</param>
/// <param name="meanLogProb">E[log(prob)]</param>
/// <returns>Gradient of the KL divergence with respect to the mean of the Dirichlet</returns>
internal static Vector CalculateGradientForMean(Vector meanPseudoCount, Gamma totalCount, Vector meanLogProb)
{
// Compute required integrals
double[] EELogGamma;
double[] EELogMLogGamma;
double[] EELogOneMinusMLogGamma;
MeanMessageExpectations(
meanPseudoCount,
totalCount,
out EELogGamma,
out EELogMLogGamma,
out EELogOneMinusMLogGamma);
// Calculate gradients of ELogGamma(sm)
int K = meanPseudoCount.Count;
double meanTotalCount = meanPseudoCount.Sum();
Vector ELogM = Vector.Zero(K);
Vector B = Vector.Zero(K);
Vector A = Vector.Zero(K);
ELogM.SetToFunction(meanPseudoCount, MMath.Digamma);
ELogM.SetToDifference(ELogM, MMath.Digamma(meanTotalCount));
for (int k = 0; k < K; k++) {
A[k] = EELogMLogGamma[k] - ELogM[k] * EELogGamma[k];
double ELogOneMinusM = MMath.Digamma(meanTotalCount - meanPseudoCount[k])
- MMath.Digamma(meanTotalCount);
B[k] = EELogOneMinusMLogGamma[k] - ELogOneMinusM * EELogGamma[k];
}
Vector gradC = A - B + B.Sum();
// Calculate gradients of analytic part
double sum = 0;
for (int k = 0; k < K; k++)
sum += meanPseudoCount[k] * meanLogProb[k];
Vector gradS = Vector.Constant(K, -sum / (meanTotalCount * meanTotalCount));
for (int k = 0; k < K; k++)
gradS[k] += meanLogProb[k] / meanTotalCount;
gradS *= totalCount.GetMean();
gradS -= gradC;
return gradS;
}
示例3: Q
public static Gamma Q(Gaussian sample, Gaussian mean, Gamma precision, Gamma q)
{
if (precision.IsPointMass || sample.IsUniform() || mean.IsUniform()) return precision;
double mx, vx;
sample.GetMeanAndVariance(out mx, out vx);
double mm, vm;
mean.GetMeanAndVariance(out mm, out vm);
double m = mx-mm;
double v = vx+vm;
if (double.IsInfinity(v)) return precision;
double m2 = m*m;
double[] dlogfss;
double x = q.GetMean();
double a = precision.Shape;
double b = precision.Rate;
if (double.IsPositiveInfinity(x)) x = (a + Math.Sqrt(a*a + 2*b/v))/(2*b);
// TODO: check initialization
if (q.IsUniform()) x = 0;
//x = QReinitialize(sample, mean, precision, x);
for (int iter = 0; iter < 1000; iter++) {
double oldx = x;
if (true) {
// want to find x that optimizes -0.5*log(v + 1/x) - 0.5*m2/(v + 1/x) + a*log(x) - b*x
// we fit a lower bound, then optimize the bound.
// this is guaranteed to improve x.
double logSlope = precision.Shape;
double slope = -precision.Rate;
double denom = v*x+1;
// 1/(v+1/x) <= 1/(v+1/x0) + (x-x0)/(v*x0+1)^2
slope += -0.5*m2/(denom*denom);
if (v*x < 1) {
// log(v+1/x) = log(v*x+1) - log(x)
// log(v*x+1) <= log(v*x0+1) + (x-x0)*v/(v*x0+1)
logSlope += 0.5;
slope += -0.5*v/denom;
x = -logSlope/slope;
// at x=0 the update is x = (a+0.5)/(b + 0.5*(m2+v))
// at x=inf the update is x = (a+0.5)/b
} else {
// if v*x > 1:
// log(v+1/x) <= log(v+1/x0) + (1/x - 1/x0)/(v + 1/x0)
double invSlope = -0.5*x/denom;
// solve for the maximum of logslope*log(r)+slope*r+invslope./r
// logslope/r + slope - invslope/r^2 = 0
// logslope*r + slope*r^2 - invslope = 0
//x = (-logSlope - Math.Sqrt(logSlope*logSlope + 4*invSlope*slope))/(2*slope);
double c = 0.5*logSlope/slope;
double d = invSlope/slope;
// note c < 0 always
x = Math.Sqrt(c*c + d) - c;
// at x=inf, invSlope=-0.5/v so the update is x = (ax + sqrt(ax*ax + 2*b/v))/(2*b)
}
if (x < 0) throw new Exception("x < 0");
} else {
dlogfss = dlogfs(x, m, v);
if (true) {
x = (precision.Shape - x*x*dlogfss[1])/(precision.Rate - dlogfss[0] - x*dlogfss[1]);
} else if (true) {
double delta = dlogfss[0] - precision.Rate;
x *= Math.Exp(-(delta*x + precision.Shape)/(delta*x + dlogfss[1]*x));
} else {
x = precision.Shape/(precision.Rate - dlogfss[0]);
}
if (x < 0) throw new Exception("x < 0");
}
if (double.IsNaN(x)) throw new Exception("x is nan");
//Console.WriteLine("{0}: {1}", iter, x);
if (MMath.AbsDiff(oldx, x, 1e-10) < 1e-10) {
x = QReinitialize(sample, mean, precision, x);
if (MMath.AbsDiff(oldx, x, 1e-10) < 1e-10) break;
}
if (iter == 1000-1) throw new Exception("not converging");
}
//x = r0;
dlogfss = dlogfs(x, m, v);
double dlogf = dlogfss[0];
double ddlogf = dlogfss[1];
a = precision.Shape - x*x*ddlogf;
b = precision.Rate - (dlogf + x*ddlogf);
if (a <= 0 || b <= 0) throw new Exception();
if (double.IsNaN(a) || double.IsNaN(b)) throw new Exception("result is nan");
return Gamma.FromShapeAndRate(a, b);
}