本文整理汇总了Java中org.apache.cassandra.config.DatabaseDescriptor.getBatchlogReplayThrottleInKB方法的典型用法代码示例。如果您正苦于以下问题:Java DatabaseDescriptor.getBatchlogReplayThrottleInKB方法的具体用法?Java DatabaseDescriptor.getBatchlogReplayThrottleInKB怎么用?Java DatabaseDescriptor.getBatchlogReplayThrottleInKB使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.config.DatabaseDescriptor
的用法示例。
在下文中一共展示了DatabaseDescriptor.getBatchlogReplayThrottleInKB方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: replayAllFailedBatches
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
private void replayAllFailedBatches() throws ExecutionException, InterruptedException
{
logger.debug("Started replayAllFailedBatches");
// rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
// max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272).
int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / StorageService.instance.getTokenMetadata().getAllEndpoints().size();
RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);
UntypedResultSet page = executeInternal(String.format("SELECT id, data, written_at, version FROM %s.%s LIMIT %d",
Keyspace.SYSTEM_KS,
SystemKeyspace.BATCHLOG_CF,
PAGE_SIZE));
while (!page.isEmpty())
{
UUID id = processBatchlogPage(page, rateLimiter);
if (page.size() < PAGE_SIZE)
break; // we've exhausted the batchlog, next query would be empty.
page = executeInternal(String.format("SELECT id, data, written_at, version FROM %s.%s WHERE token(id) > token(?) LIMIT %d",
Keyspace.SYSTEM_KS,
SystemKeyspace.BATCHLOG_CF,
PAGE_SIZE),
id);
}
cleanup();
logger.debug("Finished replayAllFailedBatches");
}
示例2: replayFailedBatches
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
private void replayFailedBatches()
{
logger.trace("Started replayFailedBatches");
// rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
// max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272).
int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size();
if (endpointsCount <= 0)
{
logger.trace("Replay cancelled as there are no peers in the ring.");
return;
}
int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount;
RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);
UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout());
ColumnFamilyStore store = Keyspace.open(SystemKeyspace.NAME).getColumnFamilyStore(SystemKeyspace.BATCHES);
int pageSize = calculatePageSize(store);
// There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is
// deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify
// token(id) > token(lastReplayedUuid) as part of the query.
String query = String.format("SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)",
SystemKeyspace.NAME,
SystemKeyspace.BATCHES);
UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid);
processBatchlogEntries(batches, pageSize, rateLimiter);
lastReplayedUuid = limitUuid;
logger.trace("Finished replayFailedBatches");
}