本文整理汇总了Java中org.apache.cassandra.db.ColumnFamilyStore.truncateBlocking方法的典型用法代码示例。如果您正苦于以下问题:Java ColumnFamilyStore.truncateBlocking方法的具体用法?Java ColumnFamilyStore.truncateBlocking怎么用?Java ColumnFamilyStore.truncateBlocking使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.db.ColumnFamilyStore
的用法示例。
在下文中一共展示了ColumnFamilyStore.truncateBlocking方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: prepareColumnFamilyStore
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
private ColumnFamilyStore prepareColumnFamilyStore()
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
store.truncateBlocking();
store.disableAutoCompaction();
long timestamp = System.currentTimeMillis();
for (int i = 0; i < 10; i++)
{
DecoratedKey key = Util.dk(Integer.toString(i));
Mutation rm = new Mutation(KEYSPACE1, key.getKey());
for (int j = 0; j < 10; j++)
rm.add("Standard1", Util.cellname(Integer.toString(j)),
ByteBufferUtil.EMPTY_BYTE_BUFFER,
timestamp,
0);
rm.apply();
}
store.forceBlockingFlush();
return store;
}
示例2: createSSTable
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
private void createSSTable(ColumnFamilyStore cfs, int numPartitions) throws IOException
{
cfs.truncateBlocking();
String schema = "CREATE TABLE \"%s\".\"%s\" (key ascii, name ascii, val ascii, val1 ascii, PRIMARY KEY (key, name))";
String query = "INSERT INTO \"%s\".\"%s\" (key, name, val) VALUES (?, ?, ?)";
try (CQLSSTableWriter writer = CQLSSTableWriter.builder()
.inDirectory(cfs.getDirectories().getDirectoryForNewSSTables())
.forTable(String.format(schema, cfs.keyspace.getName(), cfs.name))
.using(String.format(query, cfs.keyspace.getName(), cfs.name))
.build())
{
for (int j = 0; j < numPartitions; j ++)
writer.addRow(String.format("key%d", j), "col1", "0");
}
cfs.loadNewSSTables();
}
示例3: truncateCF
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@After
public void truncateCF()
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
store.truncateBlocking();
}
示例4: testFilterOldSSTables
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@Test
public void testFilterOldSSTables()
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 3 sstables
int numSSTables = 3;
for (int r = 0; r < numSSTables; r++)
{
DecoratedKey key = Util.dk(String.valueOf(r));
Mutation rm = new Mutation(KEYSPACE1, key.getKey());
rm.add(CF_STANDARD1, Util.cellname("column"), value, r);
rm.apply();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
Iterable<SSTableReader> filtered;
List<SSTableReader> sstrs = new ArrayList<>(cfs.getSSTables());
filtered = filterOldSSTables(sstrs, 0, 2);
assertEquals("when maxSSTableAge is zero, no sstables should be filtered", sstrs.size(), Iterables.size(filtered));
filtered = filterOldSSTables(sstrs, 1, 2);
assertEquals("only the newest 2 sstables should remain", 2, Iterables.size(filtered));
filtered = filterOldSSTables(sstrs, 1, 3);
assertEquals("only the newest sstable should remain", 1, Iterables.size(filtered));
filtered = filterOldSSTables(sstrs, 1, 4);
assertEquals("no sstables should remain when all are too old", 0, Iterables.size(filtered));
}
示例5: executeInternal
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
public ResultMessage executeInternal(QueryState state, QueryOptions options)
{
try
{
ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(columnFamily());
cfs.truncateBlocking();
}
catch (Exception e)
{
throw new TruncateException(e);
}
return null;
}
示例6: prepareColumnFamilyStore
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
private ColumnFamilyStore prepareColumnFamilyStore()
{
Keyspace keyspace = Keyspace.open(KEYSPACE5);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
store.truncateBlocking();
store.disableAutoCompaction();
createSSTables(store, 10);
return store;
}
示例7: testGetFullyExpiredSSTables
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@Test
public void testGetFullyExpiredSSTables()
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF2);
cfs.truncateBlocking();
DecoratedKey key = Util.dk("k1");
long timestamp1 = FBUtilities.timestampMicros(); // latest timestamp
long timestamp2 = timestamp1 - 5;
long timestamp3 = timestamp2 - 5; // oldest timestamp
// create sstable with tombstone that should be expired in no older timestamps
applyDeleteMutation(cfs.metadata, key, timestamp2);
cfs.forceBlockingFlush();
// first sstable with tombstone is compacting
Set<SSTableReader> compacting = Sets.newHashSet(cfs.getLiveSSTables());
// create another sstable with more recent timestamp
applyMutation(cfs.metadata, key, timestamp1);
cfs.forceBlockingFlush();
// second sstable is overlapping
Set<SSTableReader> overlapping = Sets.difference(Sets.newHashSet(cfs.getLiveSSTables()), compacting);
// the first sstable should be expired because the overlapping sstable is newer and the gc period is later
int gcBefore = (int) (System.currentTimeMillis() / 1000) + 5;
Set<SSTableReader> expired = CompactionController.getFullyExpiredSSTables(cfs, compacting, overlapping, gcBefore);
assertNotNull(expired);
assertEquals(1, expired.size());
assertEquals(compacting.iterator().next(), expired.iterator().next());
// however if we add an older mutation to the memtable then the sstable should not be expired
applyMutation(cfs.metadata, key, timestamp3);
expired = CompactionController.getFullyExpiredSSTables(cfs, compacting, overlapping, gcBefore);
assertNotNull(expired);
assertEquals(0, expired.size());
}
示例8: testPrepBucket
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@Test
public void testPrepBucket()
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 3 sstables
int numSSTables = 3;
for (int r = 0; r < numSSTables; r++)
{
DecoratedKey key = Util.dk(String.valueOf(r));
new RowUpdateBuilder(cfs.metadata, r, key.getKey())
.clustering("column")
.add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
List<SSTableReader> newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 9, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
assertTrue("incoming bucket should not be accepted when it has below the min threshold SSTables", newBucket.isEmpty());
newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 10, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
assertFalse("non-incoming bucket should be accepted when it has at least 2 SSTables", newBucket.isEmpty());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(0).getMinTimestamp(), sstrs.get(0).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(1).getMinTimestamp(), sstrs.get(1).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(2).getMinTimestamp(), sstrs.get(2).getMaxTimestamp());
cfs.truncateBlocking();
}
示例9: testFilterOldSSTables
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@Test
public void testFilterOldSSTables()
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 3 sstables
int numSSTables = 3;
for (int r = 0; r < numSSTables; r++)
{
DecoratedKey key = Util.dk(String.valueOf(r));
new RowUpdateBuilder(cfs.metadata, r, key.getKey())
.clustering("column")
.add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
Iterable<SSTableReader> filtered;
List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
filtered = filterOldSSTables(sstrs, 0, 2);
assertEquals("when maxSSTableAge is zero, no sstables should be filtered", sstrs.size(), Iterables.size(filtered));
filtered = filterOldSSTables(sstrs, 1, 2);
assertEquals("only the newest 2 sstables should remain", 2, Iterables.size(filtered));
filtered = filterOldSSTables(sstrs, 1, 3);
assertEquals("only the newest sstable should remain", 1, Iterables.size(filtered));
filtered = filterOldSSTables(sstrs, 1, 4);
assertEquals("no sstables should remain when all are too old", 0, Iterables.size(filtered));
cfs.truncateBlocking();
}
示例10: createSSTables
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
private void createSSTables(String ksname, String cfname, int numSSTables, int numPartition)
{
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ArrayList<Future> futures = new ArrayList<>(numSSTables);
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
for (int sstable = 0; sstable < numSSTables; sstable++)
{
for (int p = 0; p < numPartition; p++)
{
String key = String.format("%3d", p);
new RowUpdateBuilder(cfs.metadata, 0, key)
.clustering("column")
.add("val", value)
.build()
.applyUnsafe();
}
futures.add(cfs.forceFlush());
}
for (Future future : futures)
{
try
{
future.get();
} catch (InterruptedException | ExecutionException e)
{
throw new RuntimeException(e);
}
}
assertEquals(numSSTables, cfs.getLiveSSTables().size());
validateData(cfs, numPartition);
}
示例11: truncateCF
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@After
public void truncateCF()
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
store.truncateBlocking();
LifecycleTransaction.waitForDeletions();
}
示例12: truncate
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
public static void truncate(ColumnFamilyStore cfs)
{
cfs.truncateBlocking();
LifecycleTransaction.waitForDeletions();
Uninterruptibles.sleepUninterruptibly(10L, TimeUnit.MILLISECONDS);
assertEquals(0, cfs.metric.liveDiskSpaceUsed.getCount());
assertEquals(0, cfs.metric.totalDiskSpaceUsed.getCount());
validateCFS(cfs);
}
示例13: testPrepBucket
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@Test
public void testPrepBucket()
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 3 sstables
int numSSTables = 3;
for (int r = 0; r < numSSTables; r++)
{
DecoratedKey key = Util.dk(String.valueOf(r));
Mutation rm = new Mutation(KEYSPACE1, key.getKey());
rm.add(CF_STANDARD1, Util.cellname("column"), value, r);
rm.apply();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
List<SSTableReader> sstrs = new ArrayList<>(cfs.getSSTables());
List<SSTableReader> newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 9, 10);
assertTrue("incoming bucket should not be accepted when it has below the min threshold SSTables", newBucket.isEmpty());
newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 10, 10);
assertFalse("non-incoming bucket should be accepted when it has at least 2 SSTables", newBucket.isEmpty());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(0).getMinTimestamp(), sstrs.get(0).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(1).getMinTimestamp(), sstrs.get(1).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(2).getMinTimestamp(), sstrs.get(2).getMaxTimestamp());
// if we have more than the max threshold, the oldest should be dropped
Collections.sort(sstrs, Collections.reverseOrder(new Comparator<SSTableReader>() {
public int compare(SSTableReader o1, SSTableReader o2) {
return Long.compare(o1.getMinTimestamp(), o2.getMinTimestamp()) ;
}
}));
List<SSTableReader> bucket = trimToThreshold(sstrs, 2);
assertEquals("one bucket should have been dropped", 2, bucket.size());
for (SSTableReader sstr : bucket)
assertFalse("the oldest sstable should be dropped", sstr.getMinTimestamp() == 0);
}
示例14: testMaxPurgeableTimestamp
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@Test
public void testMaxPurgeableTimestamp()
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF1);
cfs.truncateBlocking();
DecoratedKey key = Util.dk("k1");
long timestamp1 = FBUtilities.timestampMicros(); // latest timestamp
long timestamp2 = timestamp1 - 5;
long timestamp3 = timestamp2 - 5; // oldest timestamp
// add to first memtable
applyMutation(cfs.metadata, key, timestamp1);
// check max purgeable timestamp without any sstables
try(CompactionController controller = new CompactionController(cfs, null, 0))
{
assertEquals(timestamp1, controller.maxPurgeableTimestamp(key)); //memtable only
cfs.forceBlockingFlush();
assertEquals(Long.MAX_VALUE, controller.maxPurgeableTimestamp(key)); //no memtables and no sstables
}
Set<SSTableReader> compacting = Sets.newHashSet(cfs.getLiveSSTables()); // first sstable is compacting
// create another sstable
applyMutation(cfs.metadata, key, timestamp2);
cfs.forceBlockingFlush();
// check max purgeable timestamp when compacting the first sstable with and without a memtable
try (CompactionController controller = new CompactionController(cfs, compacting, 0))
{
assertEquals(timestamp2, controller.maxPurgeableTimestamp(key)); //second sstable only
applyMutation(cfs.metadata, key, timestamp3);
assertEquals(timestamp3, controller.maxPurgeableTimestamp(key)); //second sstable and second memtable
}
// check max purgeable timestamp again without any sstables but with different insertion orders on the memtable
cfs.forceBlockingFlush();
//newest to oldest
try (CompactionController controller = new CompactionController(cfs, null, 0))
{
applyMutation(cfs.metadata, key, timestamp1);
applyMutation(cfs.metadata, key, timestamp2);
applyMutation(cfs.metadata, key, timestamp3);
assertEquals(timestamp3, controller.maxPurgeableTimestamp(key)); //memtable only
}
cfs.forceBlockingFlush();
//oldest to newest
try (CompactionController controller = new CompactionController(cfs, null, 0))
{
applyMutation(cfs.metadata, key, timestamp3);
applyMutation(cfs.metadata, key, timestamp2);
applyMutation(cfs.metadata, key, timestamp1);
assertEquals(timestamp3, controller.maxPurgeableTimestamp(key)); //memtable only
}
}
示例15: testPrepBucket
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@Test
public void testPrepBucket() throws Exception
{
String ksname = KEYSPACE1;
String cfname = "Standard1";
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 3 sstables
int numSSTables = 3;
for (int r = 0; r < numSSTables; r++)
{
String key = String.valueOf(r);
new RowUpdateBuilder(cfs.metadata, 0, key)
.clustering("column").add("val", value)
.build().applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
Pair<List<SSTableReader>, Double> bucket;
List<SSTableReader> interestingBucket = mostInterestingBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32);
assertTrue("nothing should be returned when all buckets are below the min threshold", interestingBucket.isEmpty());
sstrs.get(0).overrideReadMeter(new RestorableMeter(100.0, 100.0));
sstrs.get(1).overrideReadMeter(new RestorableMeter(200.0, 200.0));
sstrs.get(2).overrideReadMeter(new RestorableMeter(300.0, 300.0));
long estimatedKeys = sstrs.get(0).estimatedKeys();
// if we have more than the max threshold, the coldest should be dropped
bucket = trimToThresholdWithHotness(sstrs, 2);
assertEquals("one bucket should have been dropped", 2, bucket.left.size());
double expectedBucketHotness = (200.0 + 300.0) / estimatedKeys;
assertEquals(String.format("bucket hotness (%f) should be close to %f", bucket.right, expectedBucketHotness),
expectedBucketHotness, bucket.right, 1.0);
}