本文整理汇总了Java中org.apache.commons.math.special.Gamma.logGamma方法的典型用法代码示例。如果您正苦于以下问题:Java Gamma.logGamma方法的具体用法?Java Gamma.logGamma怎么用?Java Gamma.logGamma使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.commons.math.special.Gamma
的用法示例。
在下文中一共展示了Gamma.logGamma方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: partitionGet
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
@Override
public PartitionGetResult partitionGet(PartitionGetParam partParam) {
PartitionKey pkey = partParam.getPartKey();
pkey = psContext.getMatrixMetaManager().getMatrixMeta(pkey.getMatrixId())
.getPartitionMeta(pkey.getPartitionId()).getPartitionKey();
int ws = pkey.getStartRow();
int es = pkey.getEndRow();
LikelihoodParam.LikelihoodPartParam param = (LikelihoodParam.LikelihoodPartParam) partParam;
float beta = param.getBeta();
double lgammaBeta = Gamma.logGamma(beta);
double ll = 0;
for (int w = ws; w < es; w ++) {
ServerRow row = psContext.getMatrixStorageManager().getRow(pkey, w);
ll += likelihood(row, beta, lgammaBeta);
}
return new ScalarPartitionAggrResult(ll);
}
示例2: Sampler
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
public Sampler(CSRTokens data, LDAModel model) {
this.data = data;
this.model = model;
K = model.K();
alpha = model.alpha();
beta = model.beta();
vbeta = data.n_words * beta;
lgammaBeta = Gamma.logGamma(beta);
lgammaAlpha = Gamma.logGamma(alpha);
lgammaAlphaSum = Gamma.logGamma(alpha * K);
nk = new int[K];
wk = new int[K];
tidx = new short[K];
psum = new float[K];
tree = new FTree(K);
}
示例3: functionLogZ
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
protected double functionLogZ(double[] vector){
double valueFunction;
double sum=0;
double logpart1=0;
// log(prod(X))=log(X1)+log(X2)+.........+log(Xn)
for(int i=0;i<vector.length;i++){
logpart1=logpart1+Gamma.logGamma(vector[i]);
sum=sum+vector[i];
}
double logpart2 = Gamma.logGamma(sum);
valueFunction = logpart1-logpart2;
return valueFunction;
}
示例4: likelihood
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
private double likelihood(ServerRow row, float beta, double lgammaBeta) {
int len = (int)(row.getEndCol() - row.getStartCol());
double ll = 0;
if (row instanceof ServerDenseIntRow) {
IntBuffer buf = ((ServerDenseIntRow) row).getData();
for (int i = 0; i < len; i ++) {
if (buf.get(i) > 0)
ll += Gamma.logGamma(buf.get(i) + beta) - lgammaBeta;
}
} else
throw new AngelException("should be ServerDenseIntRow");
return ll;
}
示例5: NonCentralChiSquaredDistribution
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
/**
* @param degrees The number of degrees of freedom, not negative or zero
* @param nonCentrality The non-centrality parameter, not negative
*/
public NonCentralChiSquaredDistribution(final double degrees, final double nonCentrality) {
Validate.isTrue(degrees > 0, "degrees of freedom must be > 0, have " + degrees);
Validate.isTrue(nonCentrality >= 0, "non-centrality must be >= 0, have " + nonCentrality);
_dofOverTwo = degrees / 2.0;
_lambdaOverTwo = nonCentrality / 2.0;
_k = (int) Math.round(_lambdaOverTwo);
if (_lambdaOverTwo == 0) {
_pStart = 0.0;
} else {
final double logP = -_lambdaOverTwo + _k * Math.log(_lambdaOverTwo) - Gamma.logGamma(_k + 1);
_pStart = Math.exp(logP);
}
}
示例6: gammaLn
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
public static Matrix gammaLn(Matrix m){
int nc = m.data.length;
int nr = m.data[0].length;
double[][] result = new double[nc][];
for (int k = 0; k < nc; ++k) {
result[k] = new double[nr];
for (int w = 0; w < nr; ++w) {
result[k][w] = Gamma.logGamma(m.data[k][w]);
}
}
return new Matrix(result);
}
示例7: calculateLogGammaAlphaValues
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
public void calculateLogGammaAlphaValues() {
m_LogGammaAlpha = new double[m_AlphaValues.length][m_AlphaValues[0].length];
for (int j=0;j<m_LogGammaAlpha.length;j++) {
for (int i=0;i<m_LogGammaAlpha[0].length;i++) {
m_LogGammaAlpha[j][i] = Gamma.logGamma(m_AlphaValues[j][i]);
}
}
}
示例8: evaluate
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
/**
* @param x The argument of the function, must be greater than zero
* @return The value of the function
*/
@Override
public Double evaluate(final Double x) {
Validate.isTrue(x > 0, "x must be greater than zero");
return Gamma.logGamma(x);
}
示例9: getCDF
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
public double getCDF(final Double x) {
Validate.notNull(x, "x");
if (x < 0) {
return 0.0;
}
if ((_dofOverTwo + _lambdaOverTwo) > 1000) {
return getFraserApproxCDF(x);
}
double regGammaStart = 0;
final double halfX = x / 2.0;
final double logX = Math.log(halfX);
try {
regGammaStart = Gamma.regularizedGammaP(_dofOverTwo + _k, halfX);
} catch (final org.apache.commons.math.MathException ex) {
throw new MathException(ex);
}
double sum = _pStart * regGammaStart;
double oldSum = Double.NEGATIVE_INFINITY;
double p = _pStart;
double regGamma = regGammaStart;
double temp;
int i = _k;
// first add terms below _k
while (i > 0 && Math.abs(sum - oldSum) / sum > _eps) {
i--;
p *= (i + 1) / _lambdaOverTwo;
temp = (_dofOverTwo + i) * logX - halfX - Gamma.logGamma(_dofOverTwo + i + 1);
regGamma += Math.exp(temp);
oldSum = sum;
sum += p * regGamma;
}
p = _pStart;
regGamma = regGammaStart;
oldSum = Double.NEGATIVE_INFINITY;
i = _k;
while (Math.abs(sum - oldSum) / sum > _eps) {
i++;
p *= _lambdaOverTwo / i;
temp = (_dofOverTwo + i - 1) * logX - halfX - Gamma.logGamma(_dofOverTwo + i);
regGamma -= Math.exp(temp);
oldSum = sum;
sum += p * regGamma;
}
return sum;
}
示例10: recomputeZ
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
private void recomputeZ() {
if (Double.isNaN(z)) {
z = Gamma.logGamma(alpha) + Gamma.logGamma(beta) - Gamma.logGamma(alpha + beta);
}
}
示例11: computeLikelihood
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
/**
* Calculates a lower bound for the log liklihood of a document given
* current parameters. When this is maximised it minimises the KL divergence
* between the the variation posterior and the true posterior.
*
* The derivation can be seen in the appendix of Blei's LDA paper 2003
*
* @param doc
* @param vstate
* @return the likelihood
*/
public double computeLikelihood(Document doc, LDAVariationlState vstate) {
double likelihood = 0;
// Prepare some variables we need
double sumVarGamma = 0;
double sumDiGamma = 0;
for (int topicIndex = 0; topicIndex < ntopics; topicIndex++) {
sumVarGamma += vstate.varGamma[topicIndex];
vstate.digamma[topicIndex] = Gamma
.digamma(vstate.varGamma[topicIndex]);
sumDiGamma += vstate.digamma[topicIndex];
}
// first we sum the parameters which don't rely on iteration through the
// classes or
// iteration through the documents
likelihood += Gamma.logGamma(vstate.state.alpha * ntopics) - // eqn (15)
// line
// 1
Gamma.logGamma(vstate.state.alpha) * ntopics + // eqn (15) line
// 1
Gamma.logGamma(sumVarGamma); // eqn (15) line 4
for (int topicIndex = 0; topicIndex < ntopics; topicIndex++) {
// Now add the things that just need an interation over k
// eqn (15) line 4
final double topicGammaDiff = vstate.digamma[topicIndex] - sumDiGamma;
likelihood += Gamma.logGamma(vstate.varGamma[topicIndex]) - (vstate.varGamma[topicIndex] - 1)
* topicGammaDiff;
int wordIndex = 0;
for (final Entry wordCount : doc.getVector().entries()) {
final int word = wordCount.index;
final int count = wordCount.value;
final double logBeta = Math.log(
vstate.state.topicWord[topicIndex][word]) -
Math.log(vstate.state.topicTotal[topicIndex]
);
likelihood +=
// Count because these sums are over N and
// the sum of the counts of each unique word is == N
count * (
// Each of these lines happens to multiply by
// the current word's phi
vstate.phi[wordIndex][topicIndex] * (
// eqn (15) line 2
topicGammaDiff +
// eqn (15) line 3
count * logBeta -
// eqn (15) line 5
Math.log(vstate.phi[wordIndex][topicIndex]
)
)
);
wordIndex++;
}
}
return likelihood;
}
示例12: lgamma
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
public static double lgamma(double x) {
return Gamma.logGamma(x);
}
示例13: recomputeZ
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
/**
* Recompute the normalization factor.
*/
private void recomputeZ() {
if (Double.isNaN(z)) {
z = Gamma.logGamma(alpha) + Gamma.logGamma(beta) - Gamma.logGamma(alpha + beta);
}
}
示例14: functionZAlternative
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
protected double functionZAlternative(double[] vector){
double valueFunction=1;
double sum=0;
double logpart1=0;
// log(prod(X))=log(X1)+log(X2)+.........+log(Xn)
for(int i=0;i<vector.length;i++){
logpart1=logpart1+Gamma.logGamma(vector[i]);
sum=sum+vector[i];
}
double logpart2 = Gamma.logGamma(sum);
valueFunction = Math.exp(logpart1-logpart2);
return valueFunction;
}
示例15: getEncodingCostValueWithNormalizer
import org.apache.commons.math.special.Gamma; //导入方法依赖的package包/类
public double getEncodingCostValueWithNormalizer(){
int nbAttr = attributes.length;
int nbSubsets = data.size();
double encodingCostValue= nbSubsets*(Math.log(nbSubsets)/Math.log(2));
// iterating over all columns
for(int c=0;c<nbAttr;c++){
// we are going to produce a matrix with the frequency of occurrence of each amino acid
// the columns are going to be the amino acids
// the lines are going to be the subsets
int[][] frequency = calculateFrequency(c,nbSubsets);
//printMatrix(frequency);
// iterating over all subsets
for(int s=0;s<nbSubsets;s++){
double logProb;
if (m_LogPMatrix[m_ClusterIds.get(s).intValue()][c] != 0.0) {
logProb = m_LogPMatrix[m_ClusterIds.get(s).intValue()][c];
}
else {
// calculate probability
double[] logP = new double[m_MixtureValues.length];
double normalizer=Double.NEGATIVE_INFINITY;
for(int j=0;j<m_MixtureValues.length;j++){
double tmp=0;
for (int i=1;i<m_AlphaValues[0].length;i++) {
if (frequency[s][i]>0) {
tmp += (Gamma.logGamma(m_AlphaValues[j][i]+frequency[s][i]) - m_LogGammaAlpha[j][i]);
}
}
double[] sumVectors = addAlphaVectorAndFrequencyvector(m_AlphaValues[j], frequency[s]);
double sum=0;
for (int x=0;x<sumVectors.length;x++) {
sum+=sumVectors[x];
}
double tmp2 = m_LogGammaAlpha[j][0] - Gamma.logGamma(sum);
logP[j] = Math.log(m_MixtureValues[j])+tmp+tmp2;
// Calculating normalizer
if(logP[j] > normalizer){
normalizer=logP[j];
}
}
double sumAllPJ=0;
for(int j=0;j<m_MixtureValues.length;j++){
double convertingBackValue = Math.exp(logP[j]-normalizer);
sumAllPJ = sumAllPJ+convertingBackValue;
}
// log base 2
//logProb = (Math.log(sumAllPJ)/Math.log(2))+normalizer;
// log base 10
logProb = Math.log(sumAllPJ)+normalizer;
m_LogPMatrix[m_ClusterIds.get(s).intValue()][c] = logProb;
}
encodingCostValue = encodingCostValue - logProb;
}
}
return encodingCostValue;
}