本文整理汇总了C#中Vector.Inner方法的典型用法代码示例。如果您正苦于以下问题:C# Vector.Inner方法的具体用法?C# Vector.Inner怎么用?C# Vector.Inner使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Vector
的用法示例。
在下文中一共展示了Vector.Inner方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: InnerProductAverageLogarithm
/// <summary>
/// VMP message to 'innerProduct'
/// </summary>
/// <param name="A">Constant value for 'a'.</param>
/// <param name="BMean">Buffer 'BMean'.</param>
/// <param name="BVariance">Buffer 'BVariance'.</param>
/// <returns>The outgoing VMP message to the 'innerProduct' argument</returns>
/// <remarks><para>
/// The outgoing message is the factor viewed as a function of 'innerProduct' conditioned on the given values.
/// </para></remarks>
public static Gaussian InnerProductAverageLogarithm(Vector A, Vector BMean, PositiveDefiniteMatrix BVariance)
{
Gaussian result = new Gaussian();
// Uses John Winn's rule for deterministic factors.
// Strict variational inference would set the variance to 0.
// p(x) = N(a' E[b], a' var(b) a)
result.SetMeanAndVariance(A.Inner(BMean), BVariance.QuadraticForm(A));
return result;
}
示例2: EvaluateX
/// <summary>
/// Evaluates the kernel for a single vector (which is used for both slots)
/// </summary>
/// <param name="x">Vector</param>
/// <param name="xDeriv">Derivative of the kernel value with respect to x</param>
/// <param name="logThetaDeriv">Derivative of the kernel value with respect to the log hyper-parameters</param>
/// <returns></returns>
public override double EvaluateX(Vector x, ref Vector xDeriv, ref Vector logThetaDeriv)
{
int numInputs = variances.Count;
Vector dvec = Vector.Zero(numInputs);
dvec.SetToProduct(variances, x);
double result = x.Inner(dvec);
if (((object)logThetaDeriv) != null)
{
logThetaDeriv.SetToProduct(x, dvec);
}
if (((object)xDeriv) != null)
{
xDeriv.SetToProduct(dvec, 2.0);
}
return result;
}
示例3: EvaluateX1X2
/// <summary>
/// Evaluates the kernel for a pair of vectors
/// </summary>
/// <param name="x1">First vector</param>
/// <param name="x2">Second vector</param>
/// <param name="x1Deriv">Derivative of the kernel value with respect to x1 input vector</param>
/// <param name="logThetaDeriv">Derivative of the kernel value with respect to the log hyper-parameters</param>
/// <returns></returns>
public override double EvaluateX1X2(Vector x1, Vector x2, ref Vector x1Deriv, ref Vector logThetaDeriv)
{
if (Object.ReferenceEquals(x1,x2))
{
return EvaluateX(x1, ref x1Deriv, ref logThetaDeriv);
}
else
{
int numInputs = variances.Count;
Vector dvec = Vector.Zero(numInputs);
dvec.SetToProduct(variances, x2);
double result = x1.Inner(dvec);
if (((object)logThetaDeriv) != null)
{
logThetaDeriv.SetToProduct(x1, dvec);
}
if (((object)x1Deriv) != null)
{
x1Deriv.SetTo(dvec);
}
return result;
}
}
示例4: TotalCountAverageLogarithm
/// <summary>
/// VMP message to 'totalCount'
/// </summary>
/// <param name="mean">Constant value for 'mean'.</param>
/// <param name="totalCount">Incoming message from 'totalCount'. Must be a proper distribution. If uniform, the result will be uniform.</param>
/// <param name="prob">Incoming message from 'prob'. Must be a proper distribution. If any element is uniform, the result will be uniform.</param>
/// <param name="to_totalCount">Previous outgoing message to 'totalCount'.</param>
/// <returns>The outgoing VMP message to the 'totalCount' argument</returns>
/// <remarks><para>
/// The outgoing message is the exponential of the average log-factor value, where the average is over all arguments except 'totalCount'.
/// The formula is <c>exp(sum_(prob) p(prob) log(factor(prob,mean,totalCount)))</c>.
/// </para>
/// <para>
/// The outgoing message here would not be Dirichlet distributed, so we use Nonconjugate VMP, which
/// sends the approximate factor ensuring the gradient of the KL wrt to the variational parameters match.
/// </para></remarks>
/// <exception cref="ImproperMessageException"><paramref name="totalCount"/> is not a proper distribution</exception>
/// <exception cref="ImproperMessageException"><paramref name="prob"/> is not a proper distribution</exception>
public static Gamma TotalCountAverageLogarithm(Vector mean, [Proper] Gamma totalCount, [SkipIfUniform] Dirichlet prob, Gamma to_totalCount)
{
double at = totalCount.Shape;
double bt = totalCount.Rate;
// Find required expectations using quadrature
Vector gradElogGamma = GammaFromShapeAndRateOp.CalculateDerivatives(totalCount);
Vector gradS = gradElogGamma;
Gamma smk = new Gamma(totalCount);
for (int k = 0; k < mean.Count; k++) {
smk.Rate = totalCount.Rate / mean[k];
gradS -= GammaFromShapeAndRateOp.CalculateDerivatives(smk);
}
// Analytic
double c = mean.Inner(prob.GetMeanLog());
gradS[0] += c / bt;
gradS[1] -= c * at / (bt * bt);
Matrix mat = new Matrix(2, 2);
mat[0, 0] = MMath.Trigamma(at);
mat[1, 0] = mat[0, 1] = -1 / bt;
mat[1, 1] = at / (bt * bt);
Vector v = GammaFromShapeAndRateOp.twoByTwoInverse(mat) * gradS;
Gamma approximateFactor = Gamma.FromShapeAndRate(v[0] + 1, v[1]);
if (damping == 0.0)
return approximateFactor;
else
return (approximateFactor ^ (1 - damping)) * (to_totalCount ^ damping);
}
示例5: AAverageLogarithm
public static VectorGaussian AAverageLogarithm(double innerProduct, Vector B, VectorGaussian result)
{
// This case could be supported if we had low-rank VectorGaussian distributions.
throw new NotSupportedException(InnerProductOp.NotSupportedMessage);
if (result == default(VectorGaussian)) result = new VectorGaussian(B.Count);
result.Point = result.Point;
result.Point.SetToProduct(B, innerProduct / B.Inner(B));
return result;
}