本文整理汇总了Java中org.apache.commons.math.stat.descriptive.DescriptiveStatistics.getMean方法的典型用法代码示例。如果您正苦于以下问题:Java DescriptiveStatistics.getMean方法的具体用法?Java DescriptiveStatistics.getMean怎么用?Java DescriptiveStatistics.getMean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.commons.math.stat.descriptive.DescriptiveStatistics
的用法示例。
在下文中一共展示了DescriptiveStatistics.getMean方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: computeStatistics
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
/**
* Compute the current aggregate statistics of the
* accumulated results.
*
* @return the current aggregate statistics
*/
public AggregateStatistics computeStatistics() {
DescriptiveStatistics accuracy = new DescriptiveStatistics();
DescriptiveStatistics errorRate = new DescriptiveStatistics();
for (CMResult<CLASS> result : matrices) {
ConfusionMatrix<CLASS> m = result.getMatrix();
accuracy.addValue(m.getAccuracy());
errorRate.addValue(m.getErrorRate());
}
AggregateStatistics s = new AggregateStatistics();
s.meanAccuracy = accuracy.getMean();
s.stddevAccuracy = accuracy.getStandardDeviation();
s.meanErrorRate = errorRate.getMean();
s.stddevErrorRate = errorRate.getStandardDeviation();
return s;
}
示例2: normalize
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
/**
* Normalize (standardize) the series, so in the end it is having a mean of 0 and a standard deviation of 1.
*
* @param sample Sample to normalize.
* @return normalized (standardized) sample.
* @since 2.2
*/
public static double[] normalize(final double[] sample) {
DescriptiveStatistics stats = new DescriptiveStatistics();
// Add the data from the series to stats
for (int i = 0; i < sample.length; i++) {
stats.addValue(sample[i]);
}
// Compute mean and standard deviation
double mean = stats.getMean();
double standardDeviation = stats.getStandardDeviation();
// initialize the standardizedSample, which has the same length as the sample
double[] standardizedSample = new double[sample.length];
for (int i = 0; i < sample.length; i++) {
// z = (x- mean)/standardDeviation
standardizedSample[i] = (sample[i] - mean) / standardDeviation;
}
return standardizedSample;
}
示例3: APARegionStatistics
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
public APARegionStatistics(RealMatrix data, int regionWidth) {
int max = data.getColumnDimension();
int midPoint = max / 2;
double centralVal = data.getEntry(midPoint, midPoint);
/** NOTE - indices are inclusive in java, but in python the second index is not inclusive */
peak2mean = centralVal / ((sum(data.getData()) - centralVal) / (data.getColumnDimension() - 1));
double avgUL = mean(data.getSubMatrix(0, regionWidth - 1, 0, regionWidth - 1).getData());
peak2UL = centralVal / avgUL;
avgUR = mean(data.getSubMatrix(0, regionWidth - 1, max - regionWidth, max - 1).getData());
peak2UR = centralVal / avgUR;
double avgLL = mean(data.getSubMatrix(max - regionWidth, max - 1, 0, regionWidth - 1).getData());
peak2LL = centralVal / avgLL;
double avgLR = mean(data.getSubMatrix(max - regionWidth, max - 1, max - regionWidth, max - 1).getData());
peak2LR = centralVal / avgLR;
DescriptiveStatistics yStats = statistics(data.getSubMatrix(max - regionWidth, max - 1, 0, regionWidth - 1).getData());
ZscoreLL = (centralVal - yStats.getMean()) / yStats.getStandardDeviation();
}
示例4: buildFeatureObject
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
public Feature buildFeatureObject(DescriptiveStatistics summary,String name){
double geometricMean = summary.getGeometricMean();
double kurtosis = summary.getKurtosis();
double max = summary.getMax();
double mean = summary.getMean();
double min = summary.getMin();
double skewness = summary.getSkewness();
double standardDeviation = summary.getStandardDeviation();
double sum = summary.getSum();
double sumsq = summary.getSumsq();
double variance = summary.getVariance();
double[] values = summary.getValues();
Feature feature=new Feature(name, name, null, mean, variance, skewness);
LOG.log(Level.INFO, summary.toString());
return feature;
}
示例5: drawNormalDistributionChart
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
public void drawNormalDistributionChart(double[] values) {
DescriptiveStatistics stats = new DescriptiveStatistics();
// Add the data from the array
for (int i = 0; i < values.length; i++) {
stats.addValue(values[i]);
}
// Compute some statistics
double mean = stats.getMean();
double std = stats.getStandardDeviation();
double skewness = stats.getSkewness();
double variance = stats.getVariance();
double kurtosis = stats.getKurtosis();
System.out.println(mean + "\t" + std + "\t" + skewness + "\t" + variance + "\t" + kurtosis);
stats.clear();
}
示例6: AnalysisResultsModel
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
public AnalysisResultsModel(ArrayList<Double> firesPerCenturyPerSim, SegmentModel segment, int numberOfSamples) {
this.segment = segment;
this.numberOfSamples = numberOfSamples;
// Generate Apache Commons descriptive statistics
stats = new DescriptiveStatistics();
for (Double val : firesPerCenturyPerSim)
{
stats.addValue(val);
}
meanEventsPerCentury = stats.getMean();
std = stats.getStandardDeviation();
median = stats.getPercentile(50);
CI95 = STDEV_MULTIPLIER_FOR_95 * std;
CI99 = STDEV_MULTIPLIER_FOR_99 * std;
// Generate Weibull stats
Weibull weibull = new Weibull(firesPerCenturyPerSim);
weibullMean = weibull.getMean();
weibullMedian = weibull.getMedian();
// TODO Elena to check
weibullCI95Lower = weibull.getExceedencePercentile(5.0);
weibullCI95Upper = weibull.getExceedencePercentile(95.0);
weibullCI99 = weibull.getExceedencePercentile(99.0) - weibullMedian;
}
示例7: getBin_spectrum
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
public double[] getBin_spectrum(int shift) {
ArrayList<Double> bin_spec_al = new ArrayList<Double>();
double binSize = (fragment_tolerance * 2),
upperLimit = max_value + 0.00001;
for (double lowerLimit = min_value; lowerLimit < upperLimit; lowerLimit = lowerLimit + binSize) {
double tmp_intensity_bin = 0;
DescriptiveStatistics obj = new DescriptiveStatistics();
for (Peak p : peakList) {
double mz = p.getMz() + shift;
if (mz >= lowerLimit && mz < lowerLimit + binSize) {
obj.addValue(p.intensity);
}
}
if (obj.getN() > 0) {
if (intensities_sum_or_mean_or_median == 0) {
tmp_intensity_bin = obj.getSum();
} else if (intensities_sum_or_mean_or_median == 1) {
tmp_intensity_bin = obj.getMean();
} else if (intensities_sum_or_mean_or_median == 2) {
tmp_intensity_bin = obj.getPercentile(50);
}
}
// put every bin_pectrum
bin_spec_al.add(tmp_intensity_bin);
}
// convert an arraylist to double array
// initiate size of array
bin_size = bin_spec_al.size();
double[] bin_spectrum = new double[bin_spec_al.size()];
for (int i = 0; i < bin_spec_al.size(); i++) {
bin_spectrum[i] = bin_spec_al.get(i);
}
return bin_spectrum;
}
示例8: getSkeletonCategoryFromCropper1979
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
private Integer getSkeletonCategoryFromCropper1979(Integer value, DescriptiveStatistics windowStats, Double criticalLevel)
{
Integer skeletonCategory = 0;
if(criticalLevel==null) criticalLevel = 0.5;
double mean = windowStats.getMean();
double stdev = windowStats.getStandardDeviation();
double smallRingThreshold = mean-(stdev*criticalLevel);
int min = (int) windowStats.getMin();
if(value == min)
{
skeletonCategory = 10;
}
else if(value > smallRingThreshold)
{
skeletonCategory = 0;
}
else
{
Integer range = (int) (smallRingThreshold - min);
Integer categoryStepSize = range / 10;
skeletonCategory = (int) (0-((value-smallRingThreshold)/categoryStepSize));
}
return skeletonCategory;
}
示例9: setSurvivalInfo
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
/**
*
* @param title
* @param _siList
* @param variable
*/
public void setSurvivalInfo(ArrayList<String> title, ArrayList<SurvivalInfo> _siList, String variable) {
this.siList = new ArrayList<SurvivalInfo>();
this.title = title;
this.variable = variable;
minX = 0.0;
maxX = (double) _siList.size();
minY = 0.0;
maxY = null;
DescriptiveStatistics ds = new DescriptiveStatistics();
for (SurvivalInfo si : _siList) {
this.siList.add(si);
String v = si.getOriginalMetaData(variable);
Double value = Double.parseDouble(v);
ds.addValue(value);
if (maxTime == null || maxTime < si.getTime()) {
maxTime = si.getTime();
}
}
SurvivalInfoValueComparator sivc = new SurvivalInfoValueComparator(variable);
Collections.sort(this.siList, sivc);
mean = ds.getMean();
minY = ds.getMin();
maxY = ds.getMax();
minY = (double) Math.floor(minY);
maxY = (double) Math.ceil(maxY);
this.repaint();
}
示例10: costFromStats
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
/**
* Function to compute a scaled cost using {@link DescriptiveStatistics}. It
* assumes that this is a zero sum set of costs. It assumes that the worst case
* possible is all of the elements in one region server and the rest having 0.
*
* @param stats the costs
* @return a scaled set of costs.
*/
double costFromStats(DescriptiveStatistics stats) {
double totalCost = 0;
double mean = stats.getMean();
//Compute max as if all region servers had 0 and one had the sum of all costs. This must be
// a zero sum cost for this to make sense.
double max = ((stats.getN() - 1) * stats.getMean()) + (stats.getSum() - stats.getMean());
for (double n : stats.getValues()) {
totalCost += Math.abs(mean - n);
}
return scale(0, max, totalCost);
}
示例11: prepareBinSpectra
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
private ArrayList<double[]> prepareBinSpectra() {
// first prepare bin-spectrum to be filled with zero
int size = (2 * correctionFactor) + 1;
ArrayList<double[]> shiftedSpectra = new ArrayList<double[]>(size);
for (int i = 0; i < size; i++) {
double[] shiftedSpectrum = new double[bin_size];
shiftedSpectra.add(shiftedSpectrum);
}
// now fill each bin spectrum with correct mz values.
double binSize = (fragment_tolerance * 2),
upperLimit = max_value + 0.00001;
int current_index = 0;
for (double lowerLimit = min_value + correctionFactor; lowerLimit < upperLimit - correctionFactor; lowerLimit = lowerLimit + binSize) {
double tmp_intensity_bin = 0;
DescriptiveStatistics obj = new DescriptiveStatistics();
for (Peak p : peakList) {
double mz = p.getMz();
if (mz >= lowerLimit && mz < lowerLimit + binSize) {
obj.addValue(p.intensity);
}
}
if (obj.getN() > 0) {
if (intensities_sum_or_mean_or_median == 0) {
tmp_intensity_bin = obj.getSum();
} else if (intensities_sum_or_mean_or_median == 1) {
tmp_intensity_bin = obj.getMean();
} else if (intensities_sum_or_mean_or_median == 2) {
tmp_intensity_bin = obj.getPercentile(50);
}
}
// put every bin_pectrum
int filling_index = current_index;
// check every bin spectrum
for (double[] shifted : shiftedSpectra) {
shifted[filling_index] = tmp_intensity_bin;
filling_index++;
}
current_index++;
}
return shiftedSpectra;
}
示例12: DescriptiveStatistics
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
/**
* Filter the components to find likely letter candidates.
*
* @param components
* the components to filter
* @param swt
* the swt image
* @param image
* the original image
* @return the potential letter candidates
*/
protected static List<LetterCandidate>
findLetters(List<ConnectedComponent> components, FImage swt, FImage image, SWTTextDetector.Options options)
{
final List<LetterCandidate> output = new ArrayList<LetterCandidate>();
final DescriptiveStatistics stats = new DescriptiveStatistics();
for (final ConnectedComponent cc : components) {
// additional check for small area - speeds processing...
if (cc.pixels.size() < options.minArea)
continue;
computeStats(stats, cc, swt);
final double mean = stats.getMean();
final double variance = stats.getVariance();
final double median = stats.getPercentile(50);
// test variance of stroke width
if (variance > options.letterVarianceMean * mean)
continue;
final Rectangle bb = cc.calculateRegularBoundingBox();
// test aspect ratio
final double aspect = Math.max(bb.width, bb.height) / Math.min(bb.width, bb.height);
if (aspect > options.maxAspectRatio)
continue;
// test diameter
final float diameter = Math.max(bb.width, bb.height);
if (diameter / median > options.maxDiameterStrokeRatio)
continue;
// check occlusion
int overlapping = 0;
for (final ConnectedComponent cc2 : components) {
if (cc2 == cc)
continue;
final Rectangle bb2 = cc2.calculateRegularBoundingBox();
if (bb2.intersectionArea(bb) > 0)
overlapping++;
}
if (overlapping > options.maxNumOverlappingBoxes)
continue;
// check height
if (bb.height < options.minHeight || bb.height > options.maxHeight)
continue;
output.add(new LetterCandidate(cc, (float) median, image));
}
return output;
}
示例13: main
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; //导入方法依赖的package包/类
/**
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
//String path = args[0];
String path = "/home/mgerlich/workspace-3.5/MetFusion2/testdata/Hill/results/2010-06-15_16-49-42/";
File dir = new File(path);
//File[] list = dir.listFiles(new MyFileFilter(".vec"));
File[] results = dir.listFiles(new MyFileFilter("_result.log"));
Arrays.sort(results);
//if (list.length != 102 || results.length != 102) {
if (results.length != 102) {
System.err.println("wrong number of results files - aborting...");
System.err.println("expected 102 - was " + results.length + " for _result.log files.");
//System.exit(-1);
}
else System.out.println("expected 102 results found :)");
String[] cids = new String[results.length];
int[] worstRanks = new int[results.length];
int[] threshRanks = new int[results.length];
int[] threshTiedRanks = new int[results.length];
int[] weightRanks = new int[results.length];
int[] weightTiedRanks = new int[results.length];
for (int i = 0; i < results.length; i++) {
File f = results[i];
System.out.println(f);
BufferedReader br = new BufferedReader(new FileReader(f));
String line = "";
while((line = br.readLine()) != null) {
/**
* String header = "## CID\tworstRank\tthresholdRank\tweightedRank\tthresholdTiedRank\tweightedTiedRank\n";
*/
if(line.startsWith("##") || line.startsWith("CID"))
continue;
String[] split = line.split("\t");
cids[i] = split[0];
worstRanks[i] = Integer.parseInt(split[1]);
threshRanks[i] = Integer.parseInt(split[2]);
weightRanks[i] = Integer.parseInt(split[3]);
threshTiedRanks[i] = Integer.parseInt(split[4]);
weightTiedRanks[i] = Integer.parseInt(split[5]);
}
}
// Get a DescriptiveStatistics instance
DescriptiveStatistics stats = new DescriptiveStatistics();
// Add the data from the array
for( int i = 0; i < threshTiedRanks.length; i++) {
stats.addValue(threshTiedRanks[i]);
}
// Compute some statistics
double mean = stats.getMean();
double std = stats.getStandardDeviation();
System.out.println("mean=" + mean + "\tsd=" + std);
// double mean2 = StatUtils.mean(weightTiedRanks);
// double std2 = StatUtils.variance(weightTiedRanks);
// double median = StatUtils.percentile(weightTiedRanks, 0.5);
//double median = stats.getMedian();
}