本文整理汇总了Java中org.apache.cassandra.utils.FBUtilities.waitOnFutures方法的典型用法代码示例。如果您正苦于以下问题:Java FBUtilities.waitOnFutures方法的具体用法?Java FBUtilities.waitOnFutures怎么用?Java FBUtilities.waitOnFutures使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.utils.FBUtilities
的用法示例。
在下文中一共展示了FBUtilities.waitOnFutures方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: finishParentSession
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public synchronized void finishParentSession(UUID parentSession, Set<InetAddress> neighbors, boolean doAntiCompaction) throws InterruptedException, ExecutionException, IOException
{
try
{
if (doAntiCompaction)
{
for (InetAddress neighbor : neighbors)
{
AnticompactionRequest acr = new AnticompactionRequest(parentSession);
MessageOut<RepairMessage> req = acr.createMessage();
MessagingService.instance().sendOneWay(req, neighbor);
}
List<Future<?>> futures = doAntiCompaction(parentSession);
FBUtilities.waitOnFutures(futures);
}
}
finally
{
parentRepairSessions.remove(parentSession);
}
}
示例2: forceCompactions
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
private void forceCompactions(ColumnFamilyStore cfs) throws ExecutionException, InterruptedException
{
// re-enable compaction with thresholds low enough to force a few rounds
cfs.setCompactionThresholds(2, 4);
// loop submitting parallel compactions until they all return 0
do
{
ArrayList<Future<?>> compactions = new ArrayList<Future<?>>();
for (int i = 0; i < 10; i++)
compactions.addAll(CompactionManager.instance.submitBackground(cfs));
// another compaction attempt will be launched in the background by
// each completing compaction: not much we can do to control them here
FBUtilities.waitOnFutures(compactions);
} while (CompactionManager.instance.getPendingTasks() > 0 || CompactionManager.instance.getActiveCompactions() > 0);
if (cfs.getSSTables().size() > 1)
{
CompactionManager.instance.performMaximal(cfs);
}
}
示例3: close
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public void close()
{
try
{
FBUtilities.waitOnFutures(repairResults, DatabaseDescriptor.getWriteRpcTimeout());
}
catch (TimeoutException ex)
{
// We got all responses, but timed out while repairing
int blockFor = consistency.blockFor(keyspace);
if (Tracing.isTracing())
Tracing.trace("Timed out while read-repairing after receiving all {} data and digest responses", blockFor);
else
logger.debug("Timeout while read-repairing after receiving all {} data and digest responses", blockFor);
throw new ReadTimeoutException(consistency, blockFor-1, blockFor, true);
}
}
示例4: flushIndexesBlocking
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
/**
* Perform a blocking flush of selected indexes
*/
public void flushIndexesBlocking(Set<Index> indexes)
{
if (indexes.isEmpty())
return;
List<Future<?>> wait = new ArrayList<>();
List<Index> nonCfsIndexes = new ArrayList<>();
// for each CFS backed index, submit a flush task which we'll wait on for completion
// for the non-CFS backed indexes, we'll flush those while we wait.
synchronized (baseCfs.getTracker())
{
indexes.forEach(index ->
index.getBackingTable()
.map(cfs -> wait.add(cfs.forceFlush()))
.orElseGet(() -> nonCfsIndexes.add(index)));
}
executeAllBlocking(nonCfsIndexes.stream(), Index::getBlockingFlushTask);
FBUtilities.waitOnFutures(wait);
}
示例5: forceCompactions
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
private void forceCompactions(ColumnFamilyStore cfs)
{
// re-enable compaction with thresholds low enough to force a few rounds
cfs.setCompactionThresholds(2, 4);
// loop submitting parallel compactions until they all return 0
do
{
ArrayList<Future<?>> compactions = new ArrayList<Future<?>>();
for (int i = 0; i < 10; i++)
compactions.addAll(CompactionManager.instance.submitBackground(cfs));
// another compaction attempt will be launched in the background by
// each completing compaction: not much we can do to control them here
FBUtilities.waitOnFutures(compactions);
} while (CompactionManager.instance.getPendingTasks() > 0 || CompactionManager.instance.getActiveCompactions() > 0);
if (cfs.getLiveSSTables().size() > 1)
{
CompactionManager.instance.performMaximal(cfs, false);
}
}
示例6: saveCaches
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public void saveCaches() throws ExecutionException, InterruptedException
{
List<Future<?>> futures = new ArrayList<>(3);
logger.debug("submitting cache saves");
futures.add(keyCache.submitWrite(DatabaseDescriptor.getKeyCacheKeysToSave()));
futures.add(rowCache.submitWrite(DatabaseDescriptor.getRowCacheKeysToSave()));
futures.add(counterCache.submitWrite(DatabaseDescriptor.getCounterCacheKeysToSave()));
FBUtilities.waitOnFutures(futures);
logger.debug("cache saves completed");
}
示例7: flushSSTables
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
/**
* Flushes matching column families from the given keyspace, or all columnFamilies
* if the cf list is empty.
*/
private void flushSSTables(Iterable<ColumnFamilyStore> stores)
{
List<Future<?>> flushes = new ArrayList<>();
for (ColumnFamilyStore cfs : stores)
flushes.add(cfs.forceFlush());
FBUtilities.waitOnFutures(flushes);
}
示例8: saveCaches
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public void saveCaches() throws ExecutionException, InterruptedException
{
List<Future<?>> futures = new ArrayList<Future<?>>(2);
logger.debug("submitting cache saves");
futures.add(keyCache.submitWrite(DatabaseDescriptor.getKeyCacheKeysToSave()));
futures.add(rowCache.submitWrite(DatabaseDescriptor.getRowCacheKeysToSave()));
FBUtilities.waitOnFutures(futures);
logger.debug("cache saves completed");
}
示例9: blockForWrites
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public int blockForWrites() {
FBUtilities.waitOnFutures(futures);
logger.debug("Finished waiting on mutations from recovery");
futures.clear();
for (Keyspace keyspace : keyspacesRecovered)
futures.addAll(keyspace.flush());
FBUtilities.waitOnFutures(futures);
return futures.size();
}
示例10: executeAllBlocking
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
private static void executeAllBlocking(Stream<Index> indexers, Function<Index, Callable<?>> function)
{
List<Future<?>> waitFor = new ArrayList<>();
indexers.forEach(indexer -> {
Callable<?> task = function.apply(indexer);
if (null != task)
waitFor.add(blockingExecutor.submit(task));
});
FBUtilities.waitOnFutures(waitFor);
}
示例11: blockForWrites
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public int blockForWrites()
{
for (Map.Entry<UUID, AtomicInteger> entry : invalidMutations.entrySet())
logger.warn(String.format("Skipped %d mutations from unknown (probably removed) CF with id %s", entry.getValue().intValue(), entry.getKey()));
// wait for all the writes to finish on the mutation stage
FBUtilities.waitOnFutures(futures);
logger.trace("Finished waiting on mutations from recovery");
// flush replayed keyspaces
futures.clear();
boolean flushingSystem = false;
for (Keyspace keyspace : keyspacesRecovered)
{
if (keyspace.getName().equals(SystemKeyspace.NAME))
flushingSystem = true;
futures.addAll(keyspace.flush());
}
// also flush batchlog incase of any MV updates
if (!flushingSystem)
futures.add(Keyspace.open(SystemKeyspace.NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceFlush());
FBUtilities.waitOnFutures(futures);
return replayedCount.get();
}
示例12: testDropDuringCompaction
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
@Test
public void testDropDuringCompaction() throws Throwable
{
CompactionManager.instance.disableAutoCompaction();
//Start with crc_check_chance of 99%
createTable("CREATE TABLE %s (p text, c text, v text, s text static, PRIMARY KEY (p, c)) WITH compression = {'sstable_compression': 'LZ4Compressor', 'crc_check_chance' : 0.99}");
ColumnFamilyStore cfs = Keyspace.open(CQLTester.KEYSPACE).getColumnFamilyStore(currentTable());
//Write a few SSTables then Compact, and drop
for (int i = 0; i < 100; i++)
{
execute("INSERT INTO %s(p, c, v, s) values (?, ?, ?, ?)", "p1", "k1", "v1", "sv1");
execute("INSERT INTO %s(p, c, v) values (?, ?, ?)", "p1", "k2", "v2");
execute("INSERT INTO %s(p, s) values (?, ?)", "p2", "sv2");
cfs.forceBlockingFlush();
}
DatabaseDescriptor.setCompactionThroughputMbPerSec(1);
List<Future<?>> futures = CompactionManager.instance.submitMaximal(cfs, CompactionManager.getDefaultGcBefore(cfs, FBUtilities.nowInSeconds()), false);
execute("DROP TABLE %s");
try
{
FBUtilities.waitOnFutures(futures);
}
catch (Throwable t)
{
if (!(t.getCause() instanceof ExecutionException) || !(t.getCause().getCause() instanceof CompactionInterruptedException))
throw t;
}
}
示例13: testRowTombstoneObservedBeforePurging
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
@Test
public void testRowTombstoneObservedBeforePurging() throws InterruptedException, ExecutionException
{
String keyspace = "cql_keyspace";
String table = "table1";
ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
cfs.disableAutoCompaction();
// write a row out to one sstable
executeInternal(String.format("INSERT INTO %s.%s (k, v1, v2) VALUES (%d, '%s', %d)",
keyspace, table, 1, "foo", 1));
cfs.forceBlockingFlush();
UntypedResultSet result = executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(1, result.size());
// write a row tombstone out to a second sstable
executeInternal(String.format("DELETE FROM %s.%s WHERE k = %d", keyspace, table, 1));
cfs.forceBlockingFlush();
// basic check that the row is considered deleted
assertEquals(2, cfs.getSSTables().size());
result = executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(0, result.size());
// compact the two sstables with a gcBefore that does *not* allow the row tombstone to be purged
FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, (int) (System.currentTimeMillis() / 1000) - 10000));
// the data should be gone, but the tombstone should still exist
assertEquals(1, cfs.getSSTables().size());
result = executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(0, result.size());
// write a row out to one sstable
executeInternal(String.format("INSERT INTO %s.%s (k, v1, v2) VALUES (%d, '%s', %d)",
keyspace, table, 1, "foo", 1));
cfs.forceBlockingFlush();
assertEquals(2, cfs.getSSTables().size());
result = executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(1, result.size());
// write a row tombstone out to a different sstable
executeInternal(String.format("DELETE FROM %s.%s WHERE k = %d", keyspace, table, 1));
cfs.forceBlockingFlush();
// compact the two sstables with a gcBefore that *does* allow the row tombstone to be purged
FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, (int) (System.currentTimeMillis() / 1000) + 10000));
// both the data and the tombstone should be gone this time
assertEquals(0, cfs.getSSTables().size());
result = executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(0, result.size());
}
示例14: testUncheckedTombstoneSizeTieredCompaction
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
@Test
public void testUncheckedTombstoneSizeTieredCompaction() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(STANDARD1);
store.clearUnsafe();
store.metadata.gcGraceSeconds(1);
store.metadata.compactionStrategyOptions.put("tombstone_compaction_interval", "1");
store.metadata.compactionStrategyOptions.put("unchecked_tombstone_compaction", "false");
store.reload();
store.setCompactionStrategyClass(SizeTieredCompactionStrategy.class.getName());
// disable compaction while flushing
store.disableAutoCompaction();
//Populate sstable1 with with keys [0..9]
populate(KEYSPACE1, STANDARD1, 0, 9, 3); //ttl=3s
store.forceBlockingFlush();
//Populate sstable2 with with keys [10..19] (keys do not overlap with SSTable1)
long timestamp2 = populate(KEYSPACE1, STANDARD1, 10, 19, 3); //ttl=3s
store.forceBlockingFlush();
assertEquals(2, store.getSSTables().size());
Iterator<SSTableReader> it = store.getSSTables().iterator();
long originalSize1 = it.next().uncompressedLength();
long originalSize2 = it.next().uncompressedLength();
// wait enough to force single compaction
TimeUnit.SECONDS.sleep(5);
// enable compaction, submit background and wait for it to complete
store.enableAutoCompaction();
FBUtilities.waitOnFutures(CompactionManager.instance.submitBackground(store));
while (CompactionManager.instance.getPendingTasks() > 0 || CompactionManager.instance.getActiveCompactions() > 0)
TimeUnit.SECONDS.sleep(1);
// even though both sstables were candidate for tombstone compaction
// it was not executed because they have an overlapping token range
assertEquals(2, store.getSSTables().size());
it = store.getSSTables().iterator();
long newSize1 = it.next().uncompressedLength();
long newSize2 = it.next().uncompressedLength();
assertEquals("candidate sstable should not be tombstone-compacted because its key range overlap with other sstable",
originalSize1, newSize1);
assertEquals("candidate sstable should not be tombstone-compacted because its key range overlap with other sstable",
originalSize2, newSize2);
// now let's enable the magic property
store.metadata.compactionStrategyOptions.put("unchecked_tombstone_compaction", "true");
store.reload();
//submit background task again and wait for it to complete
FBUtilities.waitOnFutures(CompactionManager.instance.submitBackground(store));
while (CompactionManager.instance.getPendingTasks() > 0 || CompactionManager.instance.getActiveCompactions() > 0)
TimeUnit.SECONDS.sleep(1);
//we still have 2 sstables, since they were not compacted against each other
assertEquals(2, store.getSSTables().size());
it = store.getSSTables().iterator();
newSize1 = it.next().uncompressedLength();
newSize2 = it.next().uncompressedLength();
assertTrue("should be less than " + originalSize1 + ", but was " + newSize1, newSize1 < originalSize1);
assertTrue("should be less than " + originalSize2 + ", but was " + newSize2, newSize2 < originalSize2);
// make sure max timestamp of compacted sstables is recorded properly after compaction.
assertMaxTimestamp(store, timestamp2);
}
示例15: testRowTombstoneObservedBeforePurging
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
@Test
public void testRowTombstoneObservedBeforePurging()
{
String keyspace = "cql_keyspace";
String table = "table1";
ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
cfs.disableAutoCompaction();
// write a row out to one sstable
QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v1, v2) VALUES (%d, '%s', %d)",
keyspace, table, 1, "foo", 1));
cfs.forceBlockingFlush();
UntypedResultSet result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(1, result.size());
// write a row tombstone out to a second sstable
QueryProcessor.executeInternal(String.format("DELETE FROM %s.%s WHERE k = %d", keyspace, table, 1));
cfs.forceBlockingFlush();
// basic check that the row is considered deleted
assertEquals(2, cfs.getLiveSSTables().size());
result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(0, result.size());
// compact the two sstables with a gcBefore that does *not* allow the row tombstone to be purged
FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, (int) (System.currentTimeMillis() / 1000) - 10000, false));
// the data should be gone, but the tombstone should still exist
assertEquals(1, cfs.getLiveSSTables().size());
result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(0, result.size());
// write a row out to one sstable
QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v1, v2) VALUES (%d, '%s', %d)",
keyspace, table, 1, "foo", 1));
cfs.forceBlockingFlush();
assertEquals(2, cfs.getLiveSSTables().size());
result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(1, result.size());
// write a row tombstone out to a different sstable
QueryProcessor.executeInternal(String.format("DELETE FROM %s.%s WHERE k = %d", keyspace, table, 1));
cfs.forceBlockingFlush();
// compact the two sstables with a gcBefore that *does* allow the row tombstone to be purged
FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, (int) (System.currentTimeMillis() / 1000) + 10000, false));
// both the data and the tombstone should be gone this time
assertEquals(0, cfs.getLiveSSTables().size());
result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(0, result.size());
}