本文整理汇总了Java中org.apache.cassandra.db.ColumnFamilyStore.gcBefore方法的典型用法代码示例。如果您正苦于以下问题:Java ColumnFamilyStore.gcBefore方法的具体用法?Java ColumnFamilyStore.gcBefore怎么用?Java ColumnFamilyStore.gcBefore使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.db.ColumnFamilyStore
的用法示例。
在下文中一共展示了ColumnFamilyStore.gcBefore方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: basicTest
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@Test
public void basicTest() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
for (int j = 0; j < 100; j ++)
{
new RowUpdateBuilder(cfs.metadata, j, String.valueOf(j))
.clustering("0")
.add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
.build()
.apply();
}
cfs.forceBlockingFlush();
Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
assertEquals(1, sstables.size());
assertEquals(sstables.iterator().next().bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.getCount());
int nowInSec = FBUtilities.nowInSeconds();
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, false);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID()))
{
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while(ci.hasNext())
{
writer.append(ci.next());
}
writer.finish();
}
LifecycleTransaction.waitForDeletions();
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list());
assertEquals(1, filecounts);
truncate(cfs);
}
示例2: basicTest2
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@Test
public void basicTest2() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
assertEquals(1, sstables.size());
int nowInSec = FBUtilities.nowInSeconds();
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, false, 10000000, false);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID()))
{
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while (ci.hasNext())
{
writer.append(ci.next());
}
writer.finish();
}
LifecycleTransaction.waitForDeletions();
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list());
assertEquals(1, filecounts);
}
示例3: testTwoWriters
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
/**
* emulates anticompaction - writing from one source sstable to two new sstables
*
* @throws IOException
*/
@Test
public void testTwoWriters() throws IOException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = Sets.newHashSet(s);
assertEquals(1, sstables.size());
int nowInSec = FBUtilities.nowInSeconds();
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, false, false);
SSTableRewriter writer2 = new SSTableRewriter(txn, 1000, false, false);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID())
)
{
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
writer2.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while (ci.hasNext())
{
if (writer.currentWriter().getFilePointer() < 15000000)
writer.append(ci.next());
else
writer2.append(ci.next());
}
for (int i = 0; i < 5000; i++)
assertFalse(Util.getOnlyPartition(Util.cmd(cfs, ByteBufferUtil.bytes(i)).build()).isEmpty());
}
truncateCF();
validateCFS(cfs);
}
示例4: getDefaultGcBefore
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
static int getDefaultGcBefore(ColumnFamilyStore cfs)
{
// 2ndary indexes have ExpiringColumns too, so we need to purge tombstones deleted before now. We do not need to
// add any GcGrace however since 2ndary indexes are local to a node.
return cfs.isIndex() ? (int) (System.currentTimeMillis() / 1000) : cfs.gcBefore(System.currentTimeMillis());
}
示例5: replaceSSTable
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
private SSTableReader replaceSSTable(ColumnFamilyStore cfs, LifecycleTransaction txn, boolean fail)
{
List<SSTableReader> newsstables = null;
int nowInSec = FBUtilities.nowInSeconds();
try (CompactionController controller = new CompactionController(cfs, txn.originals(), cfs.gcBefore(FBUtilities.nowInSeconds())))
{
try (SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, false);
AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(txn.originals());
CompactionIterator ci = new CompactionIterator(txn.opType(), scanners.scanners, controller, nowInSec, txn.opId())
)
{
long lastCheckObsoletion = System.nanoTime();
File directory = txn.originals().iterator().next().descriptor.directory;
Descriptor desc = Descriptor.fromFilename(cfs.getSSTablePath(directory));
CFMetaData metadata = Schema.instance.getCFMetaData(desc);
rewriter.switchWriter(SSTableWriter.create(metadata,
desc,
0,
0,
0,
SerializationHeader.make(cfs.metadata, txn.originals()),
txn));
while (ci.hasNext())
{
rewriter.append(ci.next());
if (System.nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L))
{
controller.maybeRefreshOverlaps();
lastCheckObsoletion = System.nanoTime();
}
}
if (!fail)
newsstables = rewriter.finish();
else
rewriter.abort();
}
}
assertTrue(fail || newsstables != null);
if (newsstables != null)
{
Assert.assertEquals(1, newsstables.size());
return newsstables.iterator().next();
}
return null;
}
示例6: getPositionsTest
import org.apache.cassandra.db.ColumnFamilyStore; //导入方法依赖的package包/类
@Test
public void getPositionsTest() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
assertEquals(1, sstables.size());
int nowInSec = FBUtilities.nowInSeconds();
boolean checked = false;
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, false, 10000000, false);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID()))
{
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while (ci.hasNext())
{
UnfilteredRowIterator row = ci.next();
writer.append(row);
if (!checked && writer.currentWriter().getFilePointer() > 1500000)
{
checked = true;
for (SSTableReader sstable : cfs.getLiveSSTables())
{
if (sstable.openReason == SSTableReader.OpenReason.EARLY)
{
SSTableReader c = txn.current(sstables.iterator().next());
Collection<Range<Token>> r = Arrays.asList(new Range<>(cfs.getPartitioner().getMinimumToken(), cfs.getPartitioner().getMinimumToken()));
List<Pair<Long, Long>> tmplinkPositions = sstable.getPositionsForRanges(r);
List<Pair<Long, Long>> compactingPositions = c.getPositionsForRanges(r);
assertEquals(1, tmplinkPositions.size());
assertEquals(1, compactingPositions.size());
assertEquals(0, tmplinkPositions.get(0).left.longValue());
// make sure we have no overlap between the early opened file and the compacting one:
assertEquals(tmplinkPositions.get(0).right.longValue(), compactingPositions.get(0).left.longValue());
assertEquals(c.uncompressedLength(), compactingPositions.get(0).right.longValue());
}
}
}
}
assertTrue(checked);
writer.finish();
}
LifecycleTransaction.waitForDeletions();
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list());
assertEquals(1, filecounts);
truncate(cfs);
}