本文整理汇总了Java中org.apache.cassandra.utils.EstimatedHistogram类的典型用法代码示例。如果您正苦于以下问题:Java EstimatedHistogram类的具体用法?Java EstimatedHistogram怎么用?Java EstimatedHistogram使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
EstimatedHistogram类属于org.apache.cassandra.utils包,在下文中一共展示了EstimatedHistogram类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: DecayingEstimatedHistogramReservoir
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
@VisibleForTesting
DecayingEstimatedHistogramReservoir(boolean considerZeroes, int bucketCount, Clock clock)
{
if (bucketCount == DEFAULT_BUCKET_COUNT)
{
if (considerZeroes == true)
{
bucketOffsets = DEFAULT_WITH_ZERO_BUCKET_OFFSETS;
}
else
{
bucketOffsets = DEFAULT_WITHOUT_ZERO_BUCKET_OFFSETS;
}
}
else
{
bucketOffsets = EstimatedHistogram.newOffsets(bucketCount, considerZeroes);
}
decayingBuckets = new AtomicLongArray(bucketOffsets.length + 1);
buckets = new AtomicLongArray(bucketOffsets.length + 1);
this.clock = clock;
decayLandmark = clock.getTime();
}
示例2: TermHistogram
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
public TermHistogram(EstimatedHistogram histogram,
String title,
Function<Long, String> offsetName,
Function<Long, String> countName)
{
this(new TreeMap<Number, long[]>()
{
{
long[] counts = histogram.getBuckets(false);
long[] offsets = histogram.getBucketOffsets();
for (int i = 0; i < counts.length; i++)
{
long e = counts[i];
if (e > 0)
{
put(offsets[i], new long[] {e});
}
}
}
}, title, offsetName, countName);
}
示例3: printProxyHistograms
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
private void printProxyHistograms(PrintStream output)
{
StorageProxyMBean sp = this.probe.getSpProxy();
long[] offsets = new EstimatedHistogram().getBucketOffsets();
long[] rrlh = sp.getRecentReadLatencyHistogramMicros();
long[] rwlh = sp.getRecentWriteLatencyHistogramMicros();
long[] rrnglh = sp.getRecentRangeLatencyHistogramMicros();
output.println("proxy histograms");
output.println(String.format("%-10s%18s%18s%18s",
"Offset", "Read Latency", "Write Latency", "Range Latency"));
for (int i = 0; i < offsets.length; i++)
{
output.println(String.format("%-10d%18s%18s%18s",
offsets[i],
(i < rrlh.length ? rrlh[i] : "0"),
(i < rwlh.length ? rwlh[i] : "0"),
(i < rrnglh.length ? rrnglh[i] : "0")));
}
}
示例4: legacySerialize
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
/**
* Used to serialize to an old version - needed to be able to update sstable level without a full compaction.
*
* @deprecated will be removed when it is assumed that the minimum upgrade-from-version is the version that this
* patch made it into
*
* @param sstableStats
* @param legacyDesc
* @param out
* @throws IOException
*/
@Deprecated
public void legacySerialize(SSTableMetadata sstableStats, Set<Integer> ancestors, Descriptor legacyDesc, DataOutput out) throws IOException
{
EstimatedHistogram.serializer.serialize(sstableStats.estimatedRowSize, out);
EstimatedHistogram.serializer.serialize(sstableStats.estimatedColumnCount, out);
ReplayPosition.serializer.serialize(sstableStats.replayPosition, out);
out.writeLong(sstableStats.minTimestamp);
out.writeLong(sstableStats.maxTimestamp);
if (legacyDesc.version.tracksMaxLocalDeletionTime)
out.writeInt(sstableStats.maxLocalDeletionTime);
if (legacyDesc.version.hasBloomFilterFPChance)
out.writeDouble(sstableStats.bloomFilterFPChance);
out.writeDouble(sstableStats.compressionRatio);
out.writeUTF(sstableStats.partitioner);
out.writeInt(ancestors.size());
for (Integer g : ancestors)
out.writeInt(g);
StreamingHistogram.serializer.serialize(sstableStats.estimatedTombstoneDropTime, out);
out.writeInt(sstableStats.sstableLevel);
if (legacyDesc.version.tracksMaxMinColumnNames)
serializeMinMaxColumnNames(sstableStats.minColumnNames, sstableStats.maxColumnNames, out);
}
示例5: SSTableMetadata
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
private SSTableMetadata(EstimatedHistogram rowSizes,
EstimatedHistogram columnCounts,
ReplayPosition replayPosition,
long minTimestamp,
long maxTimestamp,
double cr,
String partitioner,
StreamingHistogram estimatedTombstoneDropTime)
{
this.estimatedRowSize = rowSizes;
this.estimatedColumnCount = columnCounts;
this.replayPosition = replayPosition;
this.minTimestamp = minTimestamp;
this.maxTimestamp = maxTimestamp;
this.compressionRatio = cr;
this.partitioner = partitioner;
this.estimatedTombstoneDropTime = estimatedTombstoneDropTime;
}
示例6: StatsMetadata
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
public StatsMetadata(EstimatedHistogram estimatedRowSize,
EstimatedHistogram estimatedColumnCount,
ReplayPosition replayPosition,
long minTimestamp,
long maxTimestamp,
int maxLocalDeletionTime,
double compressionRatio,
StreamingHistogram estimatedTombstoneDropTime,
int sstableLevel,
List<ByteBuffer> minColumnNames,
List<ByteBuffer> maxColumnNames)
{
this.estimatedRowSize = estimatedRowSize;
this.estimatedColumnCount = estimatedColumnCount;
this.replayPosition = replayPosition;
this.minTimestamp = minTimestamp;
this.maxTimestamp = maxTimestamp;
this.maxLocalDeletionTime = maxLocalDeletionTime;
this.compressionRatio = compressionRatio;
this.estimatedTombstoneDropTime = estimatedTombstoneDropTime;
this.sstableLevel = sstableLevel;
this.minColumnNames = minColumnNames;
this.maxColumnNames = maxColumnNames;
}
示例7: SSTable
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
protected SSTable(Descriptor descriptor, Set<Component> components, CFMetaData metadata, ReplayPosition replayPosition, IPartitioner partitioner, EstimatedHistogram rowSizes, EstimatedHistogram columnCounts)
{
// In almost all cases, metadata shouldn't be null, but allowing null allows to create a mostly functional SSTable without
// full schema definition. SSTableLoader use that ability
assert descriptor != null;
assert components != null;
assert replayPosition != null;
assert partitioner != null;
assert rowSizes != null;
assert columnCounts != null;
this.descriptor = descriptor;
Set<Component> dataComponents = new HashSet<Component>(components);
for (Component component : components)
assert component.type != Component.Type.COMPACTED_MARKER;
this.components = Collections.unmodifiableSet(dataComponents);
this.metadata = metadata;
this.replayPosition = replayPosition;
this.partitioner = partitioner;
estimatedRowSize = rowSizes;
estimatedColumnCount = columnCounts;
}
示例8: StatsMetadata
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
public StatsMetadata(EstimatedHistogram estimatedPartitionSize,
EstimatedHistogram estimatedColumnCount,
IntervalSet<CommitLogPosition> commitLogIntervals,
long minTimestamp,
long maxTimestamp,
int minLocalDeletionTime,
int maxLocalDeletionTime,
int minTTL,
int maxTTL,
double compressionRatio,
StreamingHistogram estimatedTombstoneDropTime,
int sstableLevel,
List<ByteBuffer> minClusteringValues,
List<ByteBuffer> maxClusteringValues,
boolean hasLegacyCounterShards,
long repairedAt,
long totalColumnsSet,
long totalRows)
{
this.estimatedPartitionSize = estimatedPartitionSize;
this.estimatedColumnCount = estimatedColumnCount;
this.commitLogIntervals = commitLogIntervals;
this.minTimestamp = minTimestamp;
this.maxTimestamp = maxTimestamp;
this.minLocalDeletionTime = minLocalDeletionTime;
this.maxLocalDeletionTime = maxLocalDeletionTime;
this.minTTL = minTTL;
this.maxTTL = maxTTL;
this.compressionRatio = compressionRatio;
this.estimatedTombstoneDropTime = estimatedTombstoneDropTime;
this.sstableLevel = sstableLevel;
this.minClusteringValues = minClusteringValues;
this.maxClusteringValues = maxClusteringValues;
this.hasLegacyCounterShards = hasLegacyCounterShards;
this.repairedAt = repairedAt;
this.totalColumnsSet = totalColumnsSet;
this.totalRows = totalRows;
}
示例9: getSSTableMetadata
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
public List<SSTableMetadata> getSSTableMetadata(String ksName, String cfName) {
ColumnFamilyStore cfStore = getStore(ksName, cfName);
Collection<SSTableReader> tables = cfStore.getLiveSSTables();
List<SSTableMetadata> metaData = new ArrayList<>(tables.size());
for (SSTableReader table : tables) {
SSTableMetadata tableMetadata = new SSTableMetadata();
File dataFile = new File(table.descriptor.filenameFor(Component.DATA));
tableMetadata.filename = dataFile.getName();
tableMetadata.generation = table.descriptor.generation;
try {
tableMetadata.fileTimestamp = Files.getLastModifiedTime(dataFile.toPath()).toMillis();
} catch (IOException e) {
tableMetadata.fileTimestamp = 0;
}
tableMetadata.minTimestamp = table.getMinTimestamp();
tableMetadata.maxTimestamp = table.getMaxTimestamp();
tableMetadata.diskLength = table.onDiskLength();
tableMetadata.uncompressedLength = table.uncompressedLength();
tableMetadata.keys = table.estimatedKeys();
EstimatedHistogram rowSizeHistogram = table.getEstimatedPartitionSize();
tableMetadata.maxRowSize = rowSizeHistogram.max();
tableMetadata.avgRowSize = rowSizeHistogram.mean();
EstimatedHistogram columnCountHistogram = table.getEstimatedColumnCount();
tableMetadata.maxColumnCount = columnCountHistogram.max();
tableMetadata.avgColumnCount = columnCountHistogram.mean();
tableMetadata.droppableTombstones = table.getDroppableTombstonesBefore(NOW_SECONDS - table.metadata.params.gcGraceSeconds);
tableMetadata.level = table.getSSTableLevel();
tableMetadata.isRepaired = table.isRepaired();
tableMetadata.repairedAt = table.getSSTableMetadata().repairedAt;
metaData.add(tableMetadata);
}
return metaData;
}
示例10: getSSTableMetadata
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
public List<SSTableMetadata> getSSTableMetadata(String ksName, String cfName) {
ColumnFamilyStore cfStore = getStore(ksName, cfName);
Collection<SSTableReader> tables = cfStore.getSSTables();
List<SSTableMetadata> metaData = new ArrayList<>(tables.size());
for (SSTableReader table : tables) {
SSTableMetadata tableMetadata = new SSTableMetadata();
File dataFile = new File(table.descriptor.filenameFor(Component.DATA));
tableMetadata.filename = dataFile.getName();
tableMetadata.generation = table.descriptor.generation;
try {
tableMetadata.fileTimestamp = Files.getLastModifiedTime(dataFile.toPath()).toMillis();
} catch (IOException e) {
tableMetadata.fileTimestamp = 0;
}
tableMetadata.minTimestamp = table.getMinTimestamp();
tableMetadata.maxTimestamp = table.getMaxTimestamp();
tableMetadata.diskLength = table.onDiskLength();
tableMetadata.uncompressedLength = table.uncompressedLength();
tableMetadata.keys = table.estimatedKeys();
EstimatedHistogram rowSizeHistogram = table.getEstimatedRowSize();
tableMetadata.maxRowSize = rowSizeHistogram.max();
tableMetadata.avgRowSize = rowSizeHistogram.mean();
EstimatedHistogram columnCountHistogram = table.getEstimatedColumnCount();
tableMetadata.maxColumnCount = columnCountHistogram.max();
tableMetadata.avgColumnCount = columnCountHistogram.mean();
tableMetadata.droppableTombstones = table.getDroppableTombstonesBefore(Util.NOW_SECONDS - table.metadata.getGcGraceSeconds());
tableMetadata.level = table.getSSTableLevel();
tableMetadata.isRepaired = table.isRepaired();
tableMetadata.repairedAt = table.getSSTableMetadata().repairedAt;
metaData.add(tableMetadata);
}
return metaData;
}
示例11: getSSTableMetadata
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
public List<SSTableMetadata> getSSTableMetadata(String ksName, String cfName) {
ColumnFamilyStore cfStore = getStore(ksName, cfName);
Collection<SSTableReader> tables = cfStore.getSSTables();
List<SSTableMetadata> metaData = new ArrayList<>(tables.size());
for (SSTableReader table : tables) {
SSTableMetadata tableMetadata = new SSTableMetadata();
File dataFile = new File(table.descriptor.filenameFor(Component.DATA));
tableMetadata.filename = dataFile.getName();
tableMetadata.generation = table.descriptor.generation;
try {
tableMetadata.fileTimestamp = Files.getLastModifiedTime(dataFile.toPath()).toMillis();
} catch (IOException e) {
tableMetadata.fileTimestamp = 0;
}
tableMetadata.minTimestamp = table.getMinTimestamp();
tableMetadata.maxTimestamp = table.getMaxTimestamp();
tableMetadata.diskLength = table.onDiskLength();
tableMetadata.uncompressedLength = table.uncompressedLength();
tableMetadata.keys = table.estimatedKeys();
EstimatedHistogram rowSizeHistogram = table.getEstimatedRowSize();
tableMetadata.maxRowSize = rowSizeHistogram.max();
tableMetadata.avgRowSize = rowSizeHistogram.mean();
EstimatedHistogram columnCountHistogram = table.getEstimatedColumnCount();
tableMetadata.maxColumnCount = columnCountHistogram.max();
tableMetadata.avgColumnCount = columnCountHistogram.mean();
tableMetadata.droppableTombstones = table.getDroppableTombstonesBefore(Util.NOW_SECONDS - table.metadata.getGcGraceSeconds());
tableMetadata.level = table.getSSTableLevel();
tableMetadata.isRepaired = false;
metaData.add(tableMetadata);
}
return metaData;
}
示例12: metricPercentilesAsArray
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
public double[] metricPercentilesAsArray(long[] counts)
{
double[] result = new double[7];
if (isEmpty(counts))
{
Arrays.fill(result, Double.NaN);
return result;
}
double[] offsetPercentiles = new double[] { 0.5, 0.75, 0.95, 0.98, 0.99 };
long[] offsets = new EstimatedHistogram(counts.length).getBucketOffsets();
EstimatedHistogram metric = new EstimatedHistogram(offsets, counts);
if (metric.isOverflowed())
{
System.err.println(String.format("EstimatedHistogram overflowed larger than %s, unable to calculate percentiles",
offsets[offsets.length - 1]));
for (int i = 0; i < result.length; i++)
result[i] = Double.NaN;
}
else
{
for (int i = 0; i < offsetPercentiles.length; i++)
result[i] = metric.percentile(offsetPercentiles[i]);
}
result[5] = metric.min();
result[6] = metric.max();
return result;
}
示例13: StatsMetadata
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
public StatsMetadata(EstimatedHistogram estimatedRowSize,
EstimatedHistogram estimatedColumnCount,
ReplayPosition replayPosition,
long minTimestamp,
long maxTimestamp,
int maxLocalDeletionTime,
double compressionRatio,
StreamingHistogram estimatedTombstoneDropTime,
int sstableLevel,
List<ByteBuffer> minColumnNames,
List<ByteBuffer> maxColumnNames,
boolean hasLegacyCounterShards,
long repairedAt)
{
this.estimatedRowSize = estimatedRowSize;
this.estimatedColumnCount = estimatedColumnCount;
this.replayPosition = replayPosition;
this.minTimestamp = minTimestamp;
this.maxTimestamp = maxTimestamp;
this.maxLocalDeletionTime = maxLocalDeletionTime;
this.compressionRatio = compressionRatio;
this.estimatedTombstoneDropTime = estimatedTombstoneDropTime;
this.sstableLevel = sstableLevel;
this.minColumnNames = minColumnNames;
this.maxColumnNames = maxColumnNames;
this.hasLegacyCounterShards = hasLegacyCounterShards;
this.repairedAt = repairedAt;
}
示例14: printCfHistograms
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
private void printCfHistograms(String keySpace, String columnFamily, PrintStream output)
{
ColumnFamilyStoreMBean store = this.probe.getCfsProxy(keySpace, columnFamily);
// default is 90 offsets
long[] offsets = new EstimatedHistogram().getBucketOffsets();
long[] rrlh = store.getRecentReadLatencyHistogramMicros();
long[] rwlh = store.getRecentWriteLatencyHistogramMicros();
long[] sprh = store.getRecentSSTablesPerReadHistogram();
long[] ersh = store.getEstimatedRowSizeHistogram();
long[] ecch = store.getEstimatedColumnCountHistogram();
output.println(String.format("%s/%s histograms", keySpace, columnFamily));
output.println(String.format("%-10s%10s%18s%18s%18s%18s",
"Offset", "SSTables", "Write Latency", "Read Latency", "Partition Size", "Cell Count"));
output.println(String.format("%-10s%10s%18s%18s%18s%18s",
"", "", "(micros)", "(micros)", "(bytes)", ""));
for (int i = 0; i < offsets.length; i++)
{
output.println(String.format("%-10d%10s%18s%18s%18s%18s",
offsets[i],
(i < sprh.length ? sprh[i] : "0"),
(i < rwlh.length ? rwlh[i] : "0"),
(i < rrlh.length ? rrlh[i] : "0"),
(i < ersh.length ? ersh[i] : "0"),
(i < ecch.length ? ecch[i] : "0")));
}
}
示例15: SSTableMetadata
import org.apache.cassandra.utils.EstimatedHistogram; //导入依赖的package包/类
private SSTableMetadata(EstimatedHistogram rowSizes,
EstimatedHistogram columnCounts,
ReplayPosition replayPosition,
long minTimestamp,
long maxTimestamp,
int maxLocalDeletionTime,
double bloomFilterFPChance,
double compressionRatio,
String partitioner,
StreamingHistogram estimatedTombstoneDropTime,
int sstableLevel,
List<ByteBuffer> minColumnNames,
List<ByteBuffer> maxColumnNames)
{
this.estimatedRowSize = rowSizes;
this.estimatedColumnCount = columnCounts;
this.replayPosition = replayPosition;
this.minTimestamp = minTimestamp;
this.maxTimestamp = maxTimestamp;
this.maxLocalDeletionTime = maxLocalDeletionTime;
this.bloomFilterFPChance = bloomFilterFPChance;
this.compressionRatio = compressionRatio;
this.partitioner = partitioner;
this.estimatedTombstoneDropTime = estimatedTombstoneDropTime;
this.sstableLevel = sstableLevel;
this.minColumnNames = minColumnNames;
this.maxColumnNames = maxColumnNames;
}