本文整理汇总了Java中org.apache.cassandra.io.compress.CompressionMetadata.close方法的典型用法代码示例。如果您正苦于以下问题:Java CompressionMetadata.close方法的具体用法?Java CompressionMetadata.close怎么用?Java CompressionMetadata.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.io.compress.CompressionMetadata
的用法示例。
在下文中一共展示了CompressionMetadata.close方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: overrideWithGarbage
import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
private void overrideWithGarbage(SSTableReader sstable, ByteBuffer key1, ByteBuffer key2) throws IOException
{
boolean compression = Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false"));
long startPosition, endPosition;
if (compression)
{ // overwrite with garbage the compression chunks from key1 to key2
CompressionMetadata compData = CompressionMetadata.create(sstable.getFilename());
CompressionMetadata.Chunk chunk1 = compData.chunkFor(
sstable.getPosition(PartitionPosition.ForKey.get(key1, sstable.getPartitioner()), SSTableReader.Operator.EQ).position);
CompressionMetadata.Chunk chunk2 = compData.chunkFor(
sstable.getPosition(PartitionPosition.ForKey.get(key2, sstable.getPartitioner()), SSTableReader.Operator.EQ).position);
startPosition = Math.min(chunk1.offset, chunk2.offset);
endPosition = Math.max(chunk1.offset + chunk1.length, chunk2.offset + chunk2.length);
compData.close();
}
else
{ // overwrite with garbage from key1 to key2
long row0Start = sstable.getPosition(PartitionPosition.ForKey.get(key1, sstable.getPartitioner()), SSTableReader.Operator.EQ).position;
long row1Start = sstable.getPosition(PartitionPosition.ForKey.get(key2, sstable.getPartitioner()), SSTableReader.Operator.EQ).position;
startPosition = Math.min(row0Start, row1Start);
endPosition = Math.max(row0Start, row1Start);
}
overrideWithGarbage(sstable, startPosition, endPosition);
}
示例2: testMapForCompressionMetadata
import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
@Test
public void testMapForCompressionMetadata() throws Exception
{
int OLD_MAX_SEGMENT_SIZE = MmappedRegions.MAX_SEGMENT_SIZE;
MmappedRegions.MAX_SEGMENT_SIZE = 1024;
ByteBuffer buffer = allocateBuffer(128 * 1024);
File f = File.createTempFile("testMapForCompressionMetadata", "1");
f.deleteOnExit();
File cf = File.createTempFile(f.getName() + ".metadata", "1");
cf.deleteOnExit();
MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
try(SequentialWriter writer = new CompressedSequentialWriter(f,
cf.getAbsolutePath(),
CompressionParams.snappy(),
sstableMetadataCollector))
{
writer.write(buffer);
writer.finish();
}
CompressionMetadata metadata = new CompressionMetadata(cf.getAbsolutePath(), f.length(), ChecksumType.CRC32);
try(ChannelProxy channel = new ChannelProxy(f);
MmappedRegions regions = MmappedRegions.map(channel, metadata))
{
assertFalse(regions.isEmpty());
int i = 0;
while(i < buffer.capacity())
{
CompressionMetadata.Chunk chunk = metadata.chunkFor(i);
MmappedRegions.Region region = regions.floor(chunk.offset);
assertNotNull(region);
ByteBuffer compressedChunk = region.buffer.duplicate();
assertNotNull(compressedChunk);
assertEquals(chunk.length + 4, compressedChunk.capacity());
assertEquals(chunk.offset, region.bottom());
assertEquals(chunk.offset + chunk.length + 4, region.top());
i += metadata.chunkLength();
}
}
finally
{
MmappedRegions.MAX_SEGMENT_SIZE = OLD_MAX_SEGMENT_SIZE;
metadata.close();
}
}