本文整理汇总了Java中org.apache.cassandra.db.compaction.LazilyCompactedRow类的典型用法代码示例。如果您正苦于以下问题:Java LazilyCompactedRow类的具体用法?Java LazilyCompactedRow怎么用?Java LazilyCompactedRow使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
LazilyCompactedRow类属于org.apache.cassandra.db.compaction包,在下文中一共展示了LazilyCompactedRow类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testNumberOfFiles_abort
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void testNumberOfFiles_abort() throws Exception
{
testNumberOfFiles_abort(new RewriterTest()
{
public void run(ISSTableScanner scanner, CompactionController controller, SSTableReader sstable, ColumnFamilyStore cfs, SSTableRewriter rewriter)
{
int files = 1;
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, sstable.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
}
rewriter.abort();
}
});
}
示例2: testNumberOfFiles_abort3
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void testNumberOfFiles_abort3() throws Exception
{
testNumberOfFiles_abort(new RewriterTest()
{
public void run(ISSTableScanner scanner, CompactionController controller, SSTableReader sstable, ColumnFamilyStore cfs, SSTableRewriter rewriter)
{
int files = 1;
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (files == 1 && rewriter.currentWriter().getFilePointer() > 10000000)
{
rewriter.switchWriter(getWriter(cfs, sstable.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
}
rewriter.abort();
}
});
}
示例3: basicTest
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void basicTest() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
for (int j = 0; j < 100; j ++)
{
ByteBuffer key = ByteBufferUtil.bytes(String.valueOf(j));
Mutation rm = new Mutation(KEYSPACE, key);
rm.add(CF, Util.cellname("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
rm.apply();
}
cfs.forceBlockingFlush();
Set<SSTableReader> sstables = new HashSet<>(cfs.getSSTables());
assertEquals(1, sstables.size());
SSTableRewriter writer = new SSTableRewriter(cfs, sstables, 1000, false);
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategy().getScanners(sstables);)
{
ISSTableScanner scanner = scanners.scanners.get(0);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(System.currentTimeMillis()));
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory));
while(scanner.hasNext())
{
AbstractCompactedRow row = new LazilyCompactedRow(controller, Arrays.asList(scanner.next()));
writer.append(row);
}
}
Collection<SSTableReader> newsstables = writer.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(sstables, newsstables , OperationType.COMPACTION);
Thread.sleep(100);
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list(), 0, 0);
assertEquals(1, filecounts);
}
示例4: basicTest2
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void basicTest2() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = new HashSet<>(cfs.getSSTables());
assertEquals(1, sstables.size());
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter writer = new SSTableRewriter(cfs, sstables, 1000, false);
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategy().getScanners(sstables);)
{
ISSTableScanner scanner = scanners.scanners.get(0);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(System.currentTimeMillis()));
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory));
while (scanner.hasNext())
{
AbstractCompactedRow row = new LazilyCompactedRow(controller, Arrays.asList(scanner.next()));
writer.append(row);
}
}
Collection<SSTableReader> newsstables = writer.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(sstables, newsstables, OperationType.COMPACTION);
Thread.sleep(100);
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list(), 0, 0);
assertEquals(1, filecounts);
}
示例5: testNumberOfFiles_abort2
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void testNumberOfFiles_abort2() throws Exception
{
testNumberOfFiles_abort(new RewriterTest()
{
public void run(ISSTableScanner scanner, CompactionController controller, SSTableReader sstable, ColumnFamilyStore cfs, SSTableRewriter rewriter)
{
int files = 1;
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, sstable.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
if (files == 3)
{
//testing to abort when we have nothing written in the new file
rewriter.abort();
break;
}
}
}
});
}
示例6: getPositionsTest
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void getPositionsTest() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = new HashSet<>(cfs.getSSTables());
assertEquals(1, sstables.size());
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter writer = new SSTableRewriter(cfs, sstables, 1000, false);
boolean checked = false;
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategy().getScanners(sstables);)
{
ISSTableScanner scanner = scanners.scanners.get(0);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(System.currentTimeMillis()));
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory));
while (scanner.hasNext())
{
AbstractCompactedRow row = new LazilyCompactedRow(controller, Arrays.asList(scanner.next()));
writer.append(row);
if (!checked && writer.currentWriter().getFilePointer() > 15000000)
{
checked = true;
for (SSTableReader sstable : cfs.getSSTables())
{
if (sstable.openReason == SSTableReader.OpenReason.EARLY)
{
SSTableReader c = sstables.iterator().next();
Collection<Range<Token>> r = Arrays.asList(new Range<>(cfs.partitioner.getMinimumToken(), cfs.partitioner.getMinimumToken()));
List<Pair<Long, Long>> tmplinkPositions = sstable.getPositionsForRanges(r);
List<Pair<Long, Long>> compactingPositions = c.getPositionsForRanges(r);
assertEquals(1, tmplinkPositions.size());
assertEquals(1, compactingPositions.size());
assertEquals(0, tmplinkPositions.get(0).left.longValue());
// make sure we have no overlap between the early opened file and the compacting one:
assertEquals(tmplinkPositions.get(0).right.longValue(), compactingPositions.get(0).left.longValue());
assertEquals(c.uncompressedLength(), compactingPositions.get(0).right.longValue());
}
}
}
}
}
assertTrue(checked);
Collection<SSTableReader> newsstables = writer.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(sstables, newsstables, OperationType.COMPACTION);
Thread.sleep(100);
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list(), 0, 0);
assertEquals(1, filecounts);
cfs.truncateBlocking();
Thread.sleep(1000); // make sure the deletion tasks have run etc
validateCFS(cfs);
}
示例7: testNumberOfFilesAndSizes
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void testNumberOfFilesAndSizes() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
long startStorageMetricsLoad = StorageMetrics.load.count();
Set<SSTableReader> compacting = Sets.newHashSet(s);
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0))
{
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
assertEquals(s.bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.count());
assertEquals(s.bytesOnDisk(), cfs.metric.totalDiskSpaceUsed.count());
}
}
}
List<SSTableReader> sstables = rewriter.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(compacting, sstables, OperationType.COMPACTION);
long sum = 0;
for (SSTableReader x : cfs.getSSTables())
sum += x.bytesOnDisk();
assertEquals(sum, cfs.metric.liveDiskSpaceUsed.count());
assertEquals(startStorageMetricsLoad - s.bytesOnDisk() + sum, StorageMetrics.load.count());
assertEquals(files, sstables.size());
assertEquals(files, cfs.getSSTables().size());
Thread.sleep(1000);
// tmplink and tmp files should be gone:
assertEquals(sum, cfs.metric.totalDiskSpaceUsed.count());
assertFileCounts(s.descriptor.directory.list(), 0, 0);
validateCFS(cfs);
}
示例8: testNumberOfFiles_dont_clean_readers
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void testNumberOfFiles_dont_clean_readers() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0))
{
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
}
List<SSTableReader> sstables = rewriter.finish();
assertEquals(files, sstables.size());
assertEquals(files, cfs.getSSTables().size());
assertEquals(1, cfs.getDataTracker().getView().shadowed.size());
cfs.getDataTracker().markCompactedSSTablesReplaced(compacting, sstables, OperationType.COMPACTION);
assertEquals(files, cfs.getSSTables().size());
assertEquals(0, cfs.getDataTracker().getView().shadowed.size());
Thread.sleep(1000);
assertFileCounts(s.descriptor.directory.list(), 0, 0);
validateCFS(cfs);
}
catch (Throwable t)
{
rewriter.abort();
throw t;
}
}
示例9: testNumberOfFiles_finish_empty_new_writer
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void testNumberOfFiles_finish_empty_new_writer() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0))
{
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
if (files == 3)
{
//testing to finish when we have nothing written in the new file
List<SSTableReader> sstables = rewriter.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(compacting, sstables, OperationType.COMPACTION);
break;
}
}
Thread.sleep(1000);
assertEquals(files - 1, cfs.getSSTables().size()); // we never wrote anything to the last file
assertFileCounts(s.descriptor.directory.list(), 0, 0);
validateCFS(cfs);
}
catch (Throwable t)
{
rewriter.abort();
throw t;
}
}
示例10: testNumberOfFiles_truncate
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void testNumberOfFiles_truncate() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0))
{
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
files++;
assertEquals(cfs.getSSTables().size(), files); // we have one original file plus the ones we have switched out.
}
}
List<SSTableReader> sstables = rewriter.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(compacting, sstables, OperationType.COMPACTION);
Thread.sleep(1000);
assertFileCounts(s.descriptor.directory.list(), 0, 0);
cfs.truncateBlocking();
Thread.sleep(1000); // make sure the deletion tasks have run etc
validateCFS(cfs);
}
catch (Throwable t)
{
rewriter.abort();
throw t;
}
}
示例11: testSmallFiles
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void testSmallFiles() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
SSTableReader s = writeFile(cfs, 400);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
SSTableRewriter.overrideOpenInterval(1000000);
SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0))
{
while(scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getOnDiskFilePointer() > 2500000)
{
assertEquals(files, cfs.getSSTables().size()); // all files are now opened early
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
files++;
}
}
List<SSTableReader> sstables = rewriter.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(compacting, sstables, OperationType.COMPACTION);
assertEquals(files, sstables.size());
assertEquals(files, cfs.getSSTables().size());
Thread.sleep(1000);
assertFileCounts(s.descriptor.directory.list(), 0, 0);
validateCFS(cfs);
}
catch (Throwable t)
{
rewriter.abort();
throw t;
}
}
示例12: testAbortHelper
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
private void testAbortHelper(boolean earlyException, boolean offline) throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
if (!offline)
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
cfs.getDataTracker().markCompacting(compacting);
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, offline);
SSTableWriter w = getWriter(cfs, s.descriptor.directory);
rewriter.switchWriter(w);
try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0))
{
while (scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
}
}
try
{
rewriter.finishAndThrow(earlyException);
}
catch (Throwable t)
{
rewriter.abort();
}
}
finally
{
cfs.getDataTracker().unmarkCompacting(compacting);
}
Thread.sleep(1000);
int filecount = assertFileCounts(s.descriptor.directory.list(), 0, 0);
assertEquals(filecount, 1);
if (!offline)
{
assertEquals(1, cfs.getSSTables().size());
validateCFS(cfs);
}
cfs.truncateBlocking();
Thread.sleep(1000);
filecount = assertFileCounts(s.descriptor.directory.list(), 0, 0);
if (offline)
{
// the file is not added to the CFS, therefor not truncated away above
assertEquals(1, filecount);
for (File f : s.descriptor.directory.listFiles())
{
f.delete();
}
filecount = assertFileCounts(s.descriptor.directory.list(), 0, 0);
}
assertEquals(0, filecount);
}
示例13: testAllKeysReadable
import org.apache.cassandra.db.compaction.LazilyCompactedRow; //导入依赖的package包/类
@Test
public void testAllKeysReadable() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
for (int i = 0; i < 100; i++)
{
DecoratedKey key = Util.dk(Integer.toString(i));
Mutation rm = new Mutation(KEYSPACE, key.getKey());
for (int j = 0; j < 10; j++)
rm.add(CF, Util.cellname(Integer.toString(j)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 100);
rm.apply();
}
cfs.forceBlockingFlush();
cfs.forceMajorCompaction();
validateKeys(keyspace);
assertEquals(1, cfs.getSSTables().size());
SSTableReader s = cfs.getSSTables().iterator().next();
Set<SSTableReader> compacting = new HashSet<>();
compacting.add(s);
cfs.getDataTracker().markCompacting(compacting);
SSTableRewriter rewriter = new SSTableRewriter(cfs, compacting, 1000, false);
SSTableRewriter.overrideOpenInterval(1);
SSTableWriter w = getWriter(cfs, s.descriptor.directory);
rewriter.switchWriter(w);
int keyCount = 0;
try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0))
{
while (scanner.hasNext())
{
rewriter.append(new LazilyCompactedRow(controller, Arrays.asList(scanner.next())));
if (keyCount % 10 == 0)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory));
}
keyCount++;
validateKeys(keyspace);
}
try
{
cfs.getDataTracker().markCompactedSSTablesReplaced(compacting, rewriter.finish(), OperationType.COMPACTION);
cfs.getDataTracker().unmarkCompacting(compacting);
}
catch (Throwable t)
{
rewriter.abort();
}
}
validateKeys(keyspace);
Thread.sleep(1000);
validateCFS(cfs);
}