本文整理汇总了Java中org.apache.commons.math.special.Gamma.digamma方法的典型用法代码示例。如果您正苦于以下问题:Java Gamma.digamma方法的具体用法?Java Gamma.digamma怎么用?Java Gamma.digamma使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.commons.math.special.Gamma
的用法示例。
在下文中一共展示了Gamma.digamma方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: dirichletExpectation
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
/**
* Digamma function (the first derivative of the logarithm of the gamma function).
* @param array - variational parameter
* @return
*/
static double[][] dirichletExpectation(double[][] array) {
int numRows = array.length;
int numCols = array[0].length;
double[] vector = new double[numRows];
Arrays.fill(vector, 0.0);
for (int k = 0; k < numRows; ++k) {
for (int w = 0; w < numCols; ++w) {
try{
vector[k] += array[k][w];
}catch (Exception e){
throw new RuntimeException(e);
}
}
}
for (int k = 0; k < numRows; ++k) {
vector[k] = Gamma.digamma(vector[k]);
}
double [][] approx = new double[numRows][];
for (int k = 0; k < numRows; ++k) {
approx[k] = new double[numCols];
for (int w = 0; w < numCols; ++w) {
double z = Gamma.digamma(array[k][w]);
approx[k][w] = z - vector[k];
}
}
return approx;
}
示例2: performE
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
private LDAVariationlState performE(Document doc, LDAVariationlState vstate) {
vstate.prepare(doc);
while (!variationalStateConverged(vstate)) {
int docWordIndex = 0;
for (final Entry wordCount : doc.getVector().entries()) {
double phiSum = 0;
final int word = wordCount.index;
final int count = wordCount.value;
for (int topicIndex = 0; topicIndex < vstate.phi.length; topicIndex++) {
vstate.oldphi[topicIndex] = vstate.phi[docWordIndex][topicIndex];
// If this word has been seen in this class before
if (vstate.state.topicWord[topicIndex][docWordIndex] > 0) {
// Update phi
// Remember this phi is actually the same value for
// every instance of thisparticular word.
// Whenever phi is actually used there is likely to be a
// multiplication by the number of times this particular
// word appears in this document
// From eqn 16 in blei 2003
// The sum gamma cancels when the exact phi for a given
// word is calculated
final double logBeta =
Math.log(vstate.state.topicWord[topicIndex][word]) -
Math.log(vstate.state.topicTotal[topicIndex]);
vstate.phi[docWordIndex][topicIndex] =
logBeta +
Gamma.digamma(vstate.varGamma[topicIndex]);
} else {
// if not, \Beta_wi = ETA (very small) so log \Beta_wi
// ~= -100 (ETA = 10-34)
vstate.phi[docWordIndex][topicIndex] = Gamma.digamma(vstate.varGamma[topicIndex]) - 100;
}
if (topicIndex == 0) {
phiSum = vstate.phi[docWordIndex][topicIndex];
} else {
// we need phiSum = Sum_K_i{phi}, log phiSum = log
// Sum_K_i{phi}.
// what we have is log phi
// we must calculate log (a + b) from log(a) and log(b).
// The normaliser for eqn 16
phiSum = MathUtils.logSum(phiSum,
vstate.phi[docWordIndex][topicIndex]);
}
}
for (int topicIndex = 0; topicIndex < vstate.phi.length; topicIndex++) {
// Replace log phi with the normalised phi
// normalise a given word's phi summing over all i in eqn 16
vstate.phi[docWordIndex][topicIndex] = Math.exp(
vstate.phi[docWordIndex][topicIndex] - phiSum
);
// update gamma incrementally (eqn 17 blei 2003)
// - take away the old phi,
// - add the new phi,
// - do this N times for the number of times this particular
// word appears in this document
vstate.varGamma[topicIndex] += count
* (vstate.phi[docWordIndex][topicIndex] - vstate.oldphi[topicIndex]);
}
docWordIndex++;
}
vstate.oldLikelihood = vstate.likelihood;
vstate.likelihood = computeLikelihood(doc, vstate);
vstate.iteration++;
}
return vstate;
}
示例3: computeLikelihood
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
/**
* Calculates a lower bound for the log liklihood of a document given
* current parameters. When this is maximised it minimises the KL divergence
* between the the variation posterior and the true posterior.
*
* The derivation can be seen in the appendix of Blei's LDA paper 2003
*
* @param doc
* @param vstate
* @return the likelihood
*/
public double computeLikelihood(Document doc, LDAVariationlState vstate) {
double likelihood = 0;
// Prepare some variables we need
double sumVarGamma = 0;
double sumDiGamma = 0;
for (int topicIndex = 0; topicIndex < ntopics; topicIndex++) {
sumVarGamma += vstate.varGamma[topicIndex];
vstate.digamma[topicIndex] = Gamma
.digamma(vstate.varGamma[topicIndex]);
sumDiGamma += vstate.digamma[topicIndex];
}
// first we sum the parameters which don't rely on iteration through the
// classes or
// iteration through the documents
likelihood += Gamma.logGamma(vstate.state.alpha * ntopics) - // eqn (15)
// line
// 1
Gamma.logGamma(vstate.state.alpha) * ntopics + // eqn (15) line
// 1
Gamma.logGamma(sumVarGamma); // eqn (15) line 4
for (int topicIndex = 0; topicIndex < ntopics; topicIndex++) {
// Now add the things that just need an interation over k
// eqn (15) line 4
final double topicGammaDiff = vstate.digamma[topicIndex] - sumDiGamma;
likelihood += Gamma.logGamma(vstate.varGamma[topicIndex]) - (vstate.varGamma[topicIndex] - 1)
* topicGammaDiff;
int wordIndex = 0;
for (final Entry wordCount : doc.getVector().entries()) {
final int word = wordCount.index;
final int count = wordCount.value;
final double logBeta = Math.log(
vstate.state.topicWord[topicIndex][word]) -
Math.log(vstate.state.topicTotal[topicIndex]
);
likelihood +=
// Count because these sums are over N and
// the sum of the counts of each unique word is == N
count * (
// Each of these lines happens to multiply by
// the current word's phi
vstate.phi[wordIndex][topicIndex] * (
// eqn (15) line 2
topicGammaDiff +
// eqn (15) line 3
count * logBeta -
// eqn (15) line 5
Math.log(vstate.phi[wordIndex][topicIndex]
)
)
);
wordIndex++;
}
}
return likelihood;
}