本文整理汇总了Java中org.apache.cassandra.metrics.StorageMetrics类的典型用法代码示例。如果您正苦于以下问题:Java StorageMetrics类的具体用法?Java StorageMetrics怎么用?Java StorageMetrics使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
StorageMetrics类属于org.apache.cassandra.metrics包,在下文中一共展示了StorageMetrics类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
public void run()
{
try
{
runMayThrow();
}
catch (Exception e)
{
throw new RuntimeException(e);
}
finally
{
StorageMetrics.totalHintsInProgress.dec();
getHintsInProgressFor(target).decrementAndGet();
}
}
示例2: startBroadcasting
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
public void startBroadcasting()
{
// send the first broadcast "right away" (i.e., in 2 gossip heartbeats, when we should have someone to talk to);
// after that send every BROADCAST_INTERVAL.
Runnable runnable = new Runnable()
{
public void run()
{
if (logger.isTraceEnabled())
logger.trace("Disseminating load info ...");
Gossiper.instance.addLocalApplicationState(ApplicationState.LOAD,
StorageService.instance.valueFactory.load(StorageMetrics.load.getCount()));
}
};
ScheduledExecutors.scheduledTasks.scheduleWithFixedDelay(runnable, 2 * Gossiper.intervalInMillis, BROADCAST_INTERVAL, TimeUnit.MILLISECONDS);
}
示例3: addNewSSTablesSize
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
private void addNewSSTablesSize(Iterable<SSTableReader> newSSTables)
{
for (SSTableReader sstable : newSSTables)
{
if (logger.isDebugEnabled())
logger.debug(String.format("adding %s to list of files tracked for %s.%s",
sstable.descriptor, cfstore.keyspace.getName(), cfstore.name));
long size = sstable.bytesOnDisk();
StorageMetrics.load.inc(size);
cfstore.metric.liveDiskSpaceUsed.inc(size);
cfstore.metric.totalDiskSpaceUsed.inc(size);
sstable.setTrackedBy(this);
}
}
示例4: removeOldSSTablesSize
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
private void removeOldSSTablesSize(Iterable<SSTableReader> oldSSTables)
{
for (SSTableReader sstable : oldSSTables)
{
if (logger.isDebugEnabled())
logger.debug(String.format("removing %s from list of files tracked for %s.%s",
sstable.descriptor, cfstore.keyspace.getName(), cfstore.name));
long size = sstable.bytesOnDisk();
StorageMetrics.load.dec(size);
cfstore.metric.liveDiskSpaceUsed.dec(size);
}
}
示例5: write
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
/**
* Write a hint for a iterable of nodes.
*
* @param hostIds host ids of the hint's target nodes
* @param hint the hint to store
*/
public void write(Iterable<UUID> hostIds, Hint hint)
{
if (isShutDown)
throw new IllegalStateException("HintsService is shut down and can't accept new hints");
// we have to make sure that the HintsStore instances get properly initialized - otherwise dispatch will not trigger
catalog.maybeLoadStores(hostIds);
if (hint.isLive())
bufferPool.write(hostIds, hint);
StorageMetrics.totalHints.inc(size(hostIds));
}
示例6: addNewSSTablesSize
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
private void addNewSSTablesSize(Iterable<SSTableReader> newSSTables)
{
for (SSTableReader sstable : newSSTables)
{
if (logger.isDebugEnabled())
logger.debug(String.format("adding %s to list of files tracked for %s.%s",
sstable.descriptor, cfstore.table.name, cfstore.getColumnFamilyName()));
long size = sstable.bytesOnDisk();
StorageMetrics.load.inc(size);
cfstore.metric.liveDiskSpaceUsed.inc(size);
cfstore.metric.totalDiskSpaceUsed.inc(size);
sstable.setTrackedBy(this);
}
}
示例7: run
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
public void run()
{
try
{
while (!stopped)
{
select();
}
}
catch (Throwable t)
{
if (t instanceof OutOfMemoryError)
Throwables.propagate(t);
StorageMetrics.exceptions.inc();
LOGGER.error("Uncaught Exception: ", t);
}
finally
{
try
{
selector.close(); // CASSANDRA-3867
}
catch (IOException e)
{
// ignore this exception.
}
}
}
示例8: getExceptionCount
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
public int getExceptionCount()
{
return (int)StorageMetrics.exceptions.count();
}
示例9: testNumberOfFilesAndSizes
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
@Test
public void testNumberOfFilesAndSizes() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
long startStorageMetricsLoad = StorageMetrics.load.count();
Set<SSTableReader> compacting = Sets.newHashSet(s);
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0))
{
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
assertEquals(s.bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.count());
assertEquals(s.bytesOnDisk(), cfs.metric.totalDiskSpaceUsed.count());
}
}
}
List<SSTableReader> sstables = rewriter.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(compacting, sstables, OperationType.COMPACTION);
long sum = 0;
for (SSTableReader x : cfs.getSSTables())
sum += x.bytesOnDisk();
assertEquals(sum, cfs.metric.liveDiskSpaceUsed.count());
assertEquals(startStorageMetricsLoad - s.bytesOnDisk() + sum, StorageMetrics.load.count());
assertEquals(files, sstables.size());
assertEquals(files, cfs.getSSTables().size());
Thread.sleep(1000);
// tmplink and tmp files should be gone:
assertEquals(sum, cfs.metric.totalDiskSpaceUsed.count());
assertFileCounts(s.descriptor.directory.list(), 0, 0);
validateCFS(cfs);
}
示例10: submitHint
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
private static Future<Void> submitHint(HintRunnable runnable)
{
StorageMetrics.totalHintsInProgress.inc();
getHintsInProgressFor(runnable.target).incrementAndGet();
return (Future<Void>) StageManager.getStage(Stage.MUTATION).submit(runnable);
}
示例11: getTotalHints
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
public long getTotalHints()
{
return StorageMetrics.totalHints.count();
}
示例12: getHintsInProgress
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
public int getHintsInProgress()
{
return (int) StorageMetrics.totalHintsInProgress.count();
}
示例13: getLoadString
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
public String getLoadString()
{
return FileUtils.stringifyFileSize(StorageMetrics.load.getCount());
}
示例14: getExceptionCount
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
public int getExceptionCount()
{
return (int)StorageMetrics.exceptions.getCount();
}
示例15: testNumberOfFilesAndSizes
import org.apache.cassandra.metrics.StorageMetrics; //导入依赖的package包/类
@Test
public void testNumberOfFilesAndSizes() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
long startStorageMetricsLoad = StorageMetrics.load.getCount();
long sBytesOnDisk = s.bytesOnDisk();
Set<SSTableReader> compacting = Sets.newHashSet(s);
List<SSTableReader> sstables;
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, false, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while(ci.hasNext())
{
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
files++;
assertEquals(cfs.getLiveSSTables().size(), files); // we have one original file plus the ones we have switched out.
assertEquals(s.bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.getCount());
assertEquals(s.bytesOnDisk(), cfs.metric.totalDiskSpaceUsed.getCount());
}
}
sstables = rewriter.finish();
}
LifecycleTransaction.waitForDeletions();
long sum = 0;
for (SSTableReader x : cfs.getLiveSSTables())
sum += x.bytesOnDisk();
assertEquals(sum, cfs.metric.liveDiskSpaceUsed.getCount());
assertEquals(startStorageMetricsLoad - sBytesOnDisk + sum, StorageMetrics.load.getCount());
assertEquals(files, sstables.size());
assertEquals(files, cfs.getLiveSSTables().size());
LifecycleTransaction.waitForDeletions();
// tmplink and tmp files should be gone:
assertEquals(sum, cfs.metric.totalDiskSpaceUsed.getCount());
assertFileCounts(s.descriptor.directory.list());
validateCFS(cfs);
}