本文整理汇总了Java中org.apache.commons.math.distribution.NormalDistribution类的典型用法代码示例。如果您正苦于以下问题:Java NormalDistribution类的具体用法?Java NormalDistribution怎么用?Java NormalDistribution使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
NormalDistribution类属于org.apache.commons.math.distribution包,在下文中一共展示了NormalDistribution类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: calculateNormalDistributionScore
import org.apache.commons.math.distribution.NormalDistribution; //导入依赖的package包/类
public static double calculateNormalDistributionScore(double[] values) {
Mean mean = new Mean();
double meanValue = mean.evaluate(values);
double stdDev = calculateStandardDeviation(values);
if (stdDev < 0.000001) {
return 1;
} else {
NormalDistribution distribution = new NormalDistributionImpl(meanValue, stdDev);
try {
// we are calculating the probability that the value falls within +-1 standard deviations of the mean value.
// This is normally calculated on the std deviation, however, since that changes per test, calculating on a
// constant value 1 is better for getting a normalised value.
return distribution.cumulativeProbability(meanValue - stdDev, meanValue + stdDev);
} catch (MathException e) {
return 0;
}
}
}
示例2: update
import org.apache.commons.math.distribution.NormalDistribution; //导入依赖的package包/类
private void update() {
if (useLogNormal && (binRatesNeedsUpdate || noCache)) {
// set the mean in real space to equal 1
currentLogNormalStdev = stdevInput.get().getValue();
final double newMean = -(0.5 * currentLogNormalStdev * currentLogNormalStdev);
final NormalDistribution normalDistr = new NormalDistributionImpl(newMean, currentLogNormalStdev);
try {
for (int i = 0; i < nBins; i++) {
binRates[i] = Math.exp(normalDistr.inverseCumulativeProbability((i + 0.5) / nBins));
}
} catch (MathException e) {
throw new RuntimeException("Failed to compute inverse cumulative probability!");
}
}
Double estimatedMean;
final RealParameter estimatedMeanParameter = meanRateInput.get();
if (estimatedMeanParameter == null) {
estimatedMean = 1.0;
} else {
estimatedMean = estimatedMeanParameter.getValue();
}
final Integer[] branchRatePointers = branchRatesInput.get().getValues();
for (int i = 0; i < nEstimatedRates; i++) {
int b = branchRatePointers[i];
ratesArray[i] = binRates[b] * estimatedMean;
}
if (!estimateRoot) ratesArray[rootNodeNumber] = estimatedMean;
/* StringBuffer x = new StringBuffer();
x.append(treeInput.get().getID());
for (int i = 0; i < ratesArray.length; i++) {
x.append(" ");
x.append(ratesArray[i]);
}
System.out.println(x); */
}
示例3: binconf
import org.apache.commons.math.distribution.NormalDistribution; //导入依赖的package包/类
/**
* @param x The number of positive (success) outcomes
* @param n The number of observations
* @return The (lower,upper) confidence interval
* @throws IOException IOException
*/
public Tuple binconf(Long x, Long n) throws IOException
{
NormalDistribution normalDist = new NormalDistributionImpl();
if (x == null || n == null)
return null;
if (x < 0 || n < 0)
throw new IllegalArgumentException("non-negative values expected");
if (x > n)
throw new IllegalArgumentException("invariant violation: number of successes > number of obs");
if (n == 0)
return tupleFactory.newTuple(Arrays.asList(Double.valueOf(0), Double.valueOf(0)));
try {
double zcrit = -1.0 * normalDist.inverseCumulativeProbability(alpha/2);
double z2 = zcrit * zcrit;
double p = x/(double)n;
double a = p + z2/2/n;
double b = zcrit * Math.sqrt((p * (1 - p) + z2/4/n)/n);
double c = (1 + z2/n);
double lower = (a - b) / c;
double upper = (a + b) / c;
// Add corrections for when x is very close to n. This improves the estimates.
// For more info on wilson binomial confidence interval, see paper:
// L.D. Brown, T.T. Cai and A. DasGupta, Interval estimation for a binomial proportion (with discussion),
// _Statistical Science,_*16*:101-133, 2001.
// http://www-stat.wharton.upenn.edu/~tcai/paper/Binomial-StatSci.pdf
if (x == 1)
lower = -Math.log(1 - alpha)/n;
if (x == (n - 1))
upper = 1 + Math.log(1 - alpha)/n;
return tupleFactory.newTuple(Arrays.asList(lower, upper));
}
catch (MathException e) {
throw new IOException("math error", e);
}
}
示例4: normalizeVertexScore
import org.apache.commons.math.distribution.NormalDistribution; //导入依赖的package包/类
/**
* Normalize the vertex scores.
* Uses log scale and cumulative probability distribution.
* @param vertex current vertex
*/
private void normalizeVertexScore(
Vertex<Text, DoubleWritable, NullWritable> vertex) {
double logValueDouble = Math.log(vertex.getValue().get());
if (superStep == maxSteps - 4) {
/**
* Calculate LOG(value) and aggregate to SUM_OF_LOGS.
*/
DoubleWritable logValue = new DoubleWritable(logValueDouble);
aggregate(TrustRankComputation.SUM_OF_LOGS, logValue);
} else if (superStep == maxSteps - 2) {
/** Pass previous superstep since WorkerContext will need SUM_OF_LOGS
* to be aggregated.
* In this step, get AVG_OF_LOGS (calculated by WorkerContext)
* and calculate meanSquareError, aggregate it to SUM_OF_DEVS.
* WorkerContext will use SUM_OF_DEVS to calculate stdev
* in maxsupersteps-1 step.
*/
double meanSquareError = Math.pow(logValueDouble - logAvg.get(), 2);
DoubleWritable mseWritable = new DoubleWritable(meanSquareError);
aggregate(TrustRankComputation.SUM_OF_DEVS, mseWritable);
} else if (superStep == maxSteps) {
/**
* Pass maxsupersteps-1 step since WorkerContext will calculate stdev.
* Use stdev and AVG_OF_LOGS to create a Normal Distribution.
* Calculate CDF, scale it and set the new value.
*/
double newValue = 1.0d;
double stdevValue = stdev.get();
if (stdevValue == 0.0d) {
stdevValue = 1e-10;
}
NormalDistribution dist = new NormalDistributionImpl(
logAvg.get(), stdevValue);
try {
double cdf = dist.cumulativeProbability(logValueDouble);
newValue = cdf * scale;
} catch (MathException e) {
e.printStackTrace();
}
vertex.setValue(new DoubleWritable(newValue));
}
}
示例5: normalizeVertexScore
import org.apache.commons.math.distribution.NormalDistribution; //导入依赖的package包/类
/**
* Normalize the vertex scores.
* Uses log scale and cumulative probability distribution.
* @param vertex current vertex
*/
private void normalizeVertexScore(
Vertex<Text, DoubleWritable, NullWritable> vertex) {
double logValueDouble = Math.log(vertex.getValue().get());
if (superStep == maxSteps - 4) {
/**
* Calculate LOG(value) and aggregate to SUM_OF_LOGS.
*/
DoubleWritable logValue = new DoubleWritable(logValueDouble);
aggregate(LinkRankComputation.SUM_OF_LOGS, logValue);
} else if (superStep == maxSteps - 2) {
/** Pass previous superstep since WorkerContext will need SUM_OF_LOGS
* to be aggregated.
* In this step, get AVG_OF_LOGS (calculated by WorkerContext)
* and calculate meanSquareError, aggregate it to SUM_OF_DEVS.
* WorkerContext will use SUM_OF_DEVS to calculate stdev
* in maxsupersteps-1 step.
*/
double meanSquareError = Math.pow(logValueDouble - logAvg.get(), 2);
DoubleWritable mseWritable = new DoubleWritable(meanSquareError);
aggregate(LinkRankComputation.SUM_OF_DEVS, mseWritable);
} else if (superStep == maxSteps) {
/**
* Pass maxsupersteps-1 step since WorkerContext will calculate stdev.
* Use stdev and AVG_OF_LOGS to create a Normal Distribution.
* Calculate CDF, scale it and set the new value.
*/
double newValue = 1.0d;
double stdevValue = stdev.get();
if (stdevValue == 0.0d) {
stdevValue = 1e-10;
}
NormalDistribution dist = new NormalDistributionImpl(
logAvg.get(), stdevValue);
try {
double cumProb = dist.cumulativeProbability(logValueDouble);
newValue = cumProb * scale;
} catch (MathException e) {
e.printStackTrace();
}
vertex.setValue(new DoubleWritable(newValue));
}
}