本文整理汇总了Java中org.apache.cassandra.utils.EstimatedHistogram.min方法的典型用法代码示例。如果您正苦于以下问题:Java EstimatedHistogram.min方法的具体用法?Java EstimatedHistogram.min怎么用?Java EstimatedHistogram.min使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.utils.EstimatedHistogram
的用法示例。
在下文中一共展示了EstimatedHistogram.min方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: metricPercentilesAsArray
import org.apache.cassandra.utils.EstimatedHistogram; //导入方法依赖的package包/类
public double[] metricPercentilesAsArray(long[] counts)
{
double[] result = new double[7];
if (isEmpty(counts))
{
Arrays.fill(result, Double.NaN);
return result;
}
double[] offsetPercentiles = new double[] { 0.5, 0.75, 0.95, 0.98, 0.99 };
long[] offsets = new EstimatedHistogram(counts.length).getBucketOffsets();
EstimatedHistogram metric = new EstimatedHistogram(offsets, counts);
if (metric.isOverflowed())
{
System.err.println(String.format("EstimatedHistogram overflowed larger than %s, unable to calculate percentiles",
offsets[offsets.length - 1]));
for (int i = 0; i < result.length; i++)
result[i] = Double.NaN;
}
else
{
for (int i = 0; i < offsetPercentiles.length; i++)
result[i] = metric.percentile(offsetPercentiles[i]);
}
result[5] = metric.min();
result[6] = metric.max();
return result;
}
示例2: execute
import org.apache.cassandra.utils.EstimatedHistogram; //导入方法依赖的package包/类
@Override
public void execute(NodeProbe probe)
{
checkArgument(args.size() == 2, "cfhistograms requires ks and cf args");
String keyspace = args.get(0);
String cfname = args.get(1);
// calculate percentile of row size and column count
long[] estimatedRowSize = (long[]) probe.getColumnFamilyMetric(keyspace, cfname, "EstimatedRowSizeHistogram");
long[] estimatedColumnCount = (long[]) probe.getColumnFamilyMetric(keyspace, cfname, "EstimatedColumnCountHistogram");
long[] bucketOffsets = new EstimatedHistogram().getBucketOffsets();
EstimatedHistogram rowSizeHist = new EstimatedHistogram(bucketOffsets, estimatedRowSize);
EstimatedHistogram columnCountHist = new EstimatedHistogram(bucketOffsets, estimatedColumnCount);
// build arrays to store percentile values
double[] estimatedRowSizePercentiles = new double[7];
double[] estimatedColumnCountPercentiles = new double[7];
double[] offsetPercentiles = new double[]{0.5, 0.75, 0.95, 0.98, 0.99};
for (int i = 0; i < offsetPercentiles.length; i++)
{
estimatedRowSizePercentiles[i] = rowSizeHist.percentile(offsetPercentiles[i]);
estimatedColumnCountPercentiles[i] = columnCountHist.percentile(offsetPercentiles[i]);
}
// min value
estimatedRowSizePercentiles[5] = rowSizeHist.min();
estimatedColumnCountPercentiles[5] = columnCountHist.min();
// max value
estimatedRowSizePercentiles[6] = rowSizeHist.max();
estimatedColumnCountPercentiles[6] = columnCountHist.max();
String[] percentiles = new String[]{"50%", "75%", "95%", "98%", "99%", "Min", "Max"};
double[] readLatency = probe.metricPercentilesAsArray((JmxReporter.HistogramMBean) probe.getColumnFamilyMetric(keyspace, cfname, "ReadLatency"));
double[] writeLatency = probe.metricPercentilesAsArray((JmxReporter.TimerMBean) probe.getColumnFamilyMetric(keyspace, cfname, "WriteLatency"));
double[] sstablesPerRead = probe.metricPercentilesAsArray((JmxReporter.HistogramMBean) probe.getColumnFamilyMetric(keyspace, cfname, "SSTablesPerReadHistogram"));
System.out.println(format("%s/%s histograms", keyspace, cfname));
System.out.println(format("%-10s%10s%18s%18s%18s%18s",
"Percentile", "SSTables", "Write Latency", "Read Latency", "Partition Size", "Cell Count"));
System.out.println(format("%-10s%10s%18s%18s%18s%18s",
"", "", "(micros)", "(micros)", "(bytes)", ""));
for (int i = 0; i < percentiles.length; i++)
{
System.out.println(format("%-10s%10.2f%18.2f%18.2f%18.0f%18.0f",
percentiles[i],
sstablesPerRead[i],
writeLatency[i],
readLatency[i],
estimatedRowSizePercentiles[i],
estimatedColumnCountPercentiles[i]));
}
System.out.println();
}