本文整理汇总了Java中org.apache.cassandra.io.compress.CompressedSequentialWriter类的典型用法代码示例。如果您正苦于以下问题:Java CompressedSequentialWriter类的具体用法?Java CompressedSequentialWriter怎么用?Java CompressedSequentialWriter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CompressedSequentialWriter类属于org.apache.cassandra.io.compress包,在下文中一共展示了CompressedSequentialWriter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: BigTableWriter
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public BigTableWriter(Descriptor descriptor,
Long keyCount,
Long repairedAt,
CFMetaData metadata,
MetadataCollector metadataCollector,
SerializationHeader header,
LifecycleTransaction txn)
{
super(descriptor, keyCount, repairedAt, metadata, metadataCollector, header);
txn.trackNew(this); // must track before any files are created
if (compression)
{
dataFile = SequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
metadata.params.compression,
metadataCollector);
dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
}
else
{
dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC)));
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode(), false);
}
iwriter = new IndexWriter(keyCount, dataFile);
}
示例2: BigTableWriter
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public BigTableWriter(Descriptor descriptor,
long keyCount,
long repairedAt,
CFMetaData metadata,
MetadataCollector metadataCollector,
SerializationHeader header)
{
super(descriptor, keyCount, repairedAt, metadata, metadataCollector, header);
//txn.trackNew(this); // must track before any files are created
if (compression)
{
dataFile = new CompressedSequentialWriter(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
descriptor.filenameFor(descriptor.digestComponent),
writerOption,
metadata.params.compression,
metadataCollector, descriptor.getConfiguration());
}
else
{
dataFile = new ChecksummedSequentialWriter(getFilename(),
descriptor.filenameFor(Component.CRC),
descriptor.filenameFor(descriptor.digestComponent),
writerOption,
descriptor.getConfiguration());
}
dbuilder = new FileHandle.Builder(descriptor.filenameFor(Component.DATA))
.withConfiguration(descriptor.getConfiguration())
.compressed(compression);
//chunkCache.ifPresent(dbuilder::withChunkCache);
iwriter = new IndexWriter(keyCount);
columnIndexWriter = new ColumnIndex(this.header, dataFile, descriptor.version, this.observers,
getRowIndexEntrySerializer().indexInfoSerializer());
}
示例3: SSTableWriter
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public SSTableWriter(String filename,
long keyCount,
long repairedAt,
CFMetaData metadata,
IPartitioner partitioner,
MetadataCollector sstableMetadataCollector)
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
this.repairedAt = repairedAt;
if (compression)
{
dataFile = SequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
metadata.compressionParameters(),
sstableMetadataCollector);
dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
}
else
{
dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC)));
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
}
iwriter = new IndexWriter(keyCount, dataFile);
this.sstableMetadataCollector = sstableMetadataCollector;
}
示例4: open
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public static CompressedSequentialWriter open(String dataFilePath,
String offsetsPath,
CompressionParameters parameters,
MetadataCollector sstableMetadataCollector)
{
return new CompressedSequentialWriter(new File(dataFilePath), offsetsPath, parameters, sstableMetadataCollector);
}
示例5: SSTableWriter
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector)
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
!metadata.populateIoCacheOnFlush(),
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), !metadata.populateIoCacheOnFlush());
dataFile.setDataIntegrityWriter(DataIntegrityMetadata.checksumWriter(descriptor));
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
示例6: open
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public static CompressedSequentialWriter open(String dataFilePath,
String offsetsPath,
CompressionParams parameters,
MetadataCollector sstableMetadataCollector)
{
return new CompressedSequentialWriter(new File(dataFilePath), offsetsPath, parameters, sstableMetadataCollector);
}
示例7: SSTableWriter
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public SSTableWriter(String filename,
long keyCount,
long repairedAt,
CFMetaData metadata,
IPartitioner<?> partitioner,
MetadataCollector sstableMetadataCollector)
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
this.repairedAt = repairedAt;
iwriter = new IndexWriter(keyCount);
if (compression)
{
dataFile = SequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
metadata.compressionParameters(),
sstableMetadataCollector);
dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
}
else
{
dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC)));
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
示例8: SSTableWriter
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
SSTableMetadata.Collector sstableMetadataCollector)
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
!metadata.populateIoCacheOnFlush(),
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()),
!metadata.populateIoCacheOnFlush());
dataFile.setComputeDigest();
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
示例9: SSTableWriter
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public SSTableWriter(String filename,
long keyCount,
CFMetaData metadata,
IPartitioner<?> partitioner,
MetadataCollector sstableMetadataCollector)
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
iwriter = new IndexWriter(keyCount);
if (compression)
{
dbuilder = SegmentedFile.getCompressedBuilder();
dataFile = CompressedSequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
!metadata.populateIoCacheOnFlush(),
metadata.compressionParameters(),
sstableMetadataCollector);
}
else
{
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = SequentialWriter.open(new File(getFilename()), !metadata.populateIoCacheOnFlush());
dataFile.setDataIntegrityWriter(DataIntegrityMetadata.checksumWriter(descriptor));
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
示例10: open
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public static CompressedSequentialWriter open(String dataFilePath,
String offsetsPath,
boolean skipIOCache,
CompressionParameters parameters,
MetadataCollector sstableMetadataCollector)
{
return new CompressedSequentialWriter(new File(dataFilePath), offsetsPath, skipIOCache, parameters, sstableMetadataCollector);
}
示例11: Builder
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public Builder(CompressedSequentialWriter writer)
{
super(writer);
}
示例12: getCompressedBuilder
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public static Builder getCompressedBuilder(CompressedSequentialWriter writer)
{
return new CompressedPoolingSegmentedFile.Builder(writer);
}
示例13: Builder
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public Builder(CompressedSequentialWriter writer)
{
this.writer = writer;
}
示例14: getCompressedBuilder
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public static Builder getCompressedBuilder(CompressedSequentialWriter writer)
{
return new CompressedSegmentedFile.Builder(writer);
}
示例15: testMapForCompressionMetadata
import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
@Test
public void testMapForCompressionMetadata() throws Exception
{
int OLD_MAX_SEGMENT_SIZE = MmappedRegions.MAX_SEGMENT_SIZE;
MmappedRegions.MAX_SEGMENT_SIZE = 1024;
ByteBuffer buffer = allocateBuffer(128 * 1024);
File f = File.createTempFile("testMapForCompressionMetadata", "1");
f.deleteOnExit();
File cf = File.createTempFile(f.getName() + ".metadata", "1");
cf.deleteOnExit();
MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
try(SequentialWriter writer = new CompressedSequentialWriter(f,
cf.getAbsolutePath(),
CompressionParams.snappy(),
sstableMetadataCollector))
{
writer.write(buffer);
writer.finish();
}
CompressionMetadata metadata = new CompressionMetadata(cf.getAbsolutePath(), f.length(), ChecksumType.CRC32);
try(ChannelProxy channel = new ChannelProxy(f);
MmappedRegions regions = MmappedRegions.map(channel, metadata))
{
assertFalse(regions.isEmpty());
int i = 0;
while(i < buffer.capacity())
{
CompressionMetadata.Chunk chunk = metadata.chunkFor(i);
MmappedRegions.Region region = regions.floor(chunk.offset);
assertNotNull(region);
ByteBuffer compressedChunk = region.buffer.duplicate();
assertNotNull(compressedChunk);
assertEquals(chunk.length + 4, compressedChunk.capacity());
assertEquals(chunk.offset, region.bottom());
assertEquals(chunk.offset + chunk.length + 4, region.top());
i += metadata.chunkLength();
}
}
finally
{
MmappedRegions.MAX_SEGMENT_SIZE = OLD_MAX_SEGMENT_SIZE;
metadata.close();
}
}