当前位置: 首页>>代码示例>>Java>>正文


Java CompressedSequentialWriter类代码示例

本文整理汇总了Java中org.apache.cassandra.io.compress.CompressedSequentialWriter的典型用法代码示例。如果您正苦于以下问题:Java CompressedSequentialWriter类的具体用法?Java CompressedSequentialWriter怎么用?Java CompressedSequentialWriter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


CompressedSequentialWriter类属于org.apache.cassandra.io.compress包,在下文中一共展示了CompressedSequentialWriter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: BigTableWriter

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public BigTableWriter(Descriptor descriptor, 
                      Long keyCount, 
                      Long repairedAt, 
                      CFMetaData metadata, 
                      MetadataCollector metadataCollector, 
                      SerializationHeader header,
                      LifecycleTransaction txn)
{
    super(descriptor, keyCount, repairedAt, metadata, metadataCollector, header);
    txn.trackNew(this); // must track before any files are created

    if (compression)
    {
        dataFile = SequentialWriter.open(getFilename(),
                                         descriptor.filenameFor(Component.COMPRESSION_INFO),
                                         metadata.params.compression,
                                         metadataCollector);
        dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
    }
    else
    {
        dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC)));
        dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode(), false);
    }
    iwriter = new IndexWriter(keyCount, dataFile);
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:27,代码来源:BigTableWriter.java

示例2: BigTableWriter

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public BigTableWriter(Descriptor descriptor,
                      long keyCount,
                      long repairedAt,
                      CFMetaData metadata,
                      MetadataCollector metadataCollector,
                      SerializationHeader header)
{
    super(descriptor, keyCount, repairedAt, metadata, metadataCollector, header);
    //txn.trackNew(this); // must track before any files are created

    if (compression)
    {
        dataFile = new CompressedSequentialWriter(getFilename(),
                                         descriptor.filenameFor(Component.COMPRESSION_INFO),
                                         descriptor.filenameFor(descriptor.digestComponent),
                                         writerOption,
                                         metadata.params.compression,
                                         metadataCollector, descriptor.getConfiguration());
    }
    else
    {
        dataFile = new ChecksummedSequentialWriter(getFilename(),
                descriptor.filenameFor(Component.CRC),
                descriptor.filenameFor(descriptor.digestComponent),
                writerOption,
                descriptor.getConfiguration());
    }
    dbuilder = new FileHandle.Builder(descriptor.filenameFor(Component.DATA))
                             .withConfiguration(descriptor.getConfiguration())
                             .compressed(compression);
    //chunkCache.ifPresent(dbuilder::withChunkCache);
    iwriter = new IndexWriter(keyCount);

    columnIndexWriter = new ColumnIndex(this.header, dataFile, descriptor.version, this.observers,
                                        getRowIndexEntrySerializer().indexInfoSerializer());
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:37,代码来源:BigTableWriter.java

示例3: SSTableWriter

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public SSTableWriter(String filename,
                     long keyCount,
                     long repairedAt,
                     CFMetaData metadata,
                     IPartitioner partitioner,
                     MetadataCollector sstableMetadataCollector)
{
    super(Descriptor.fromFilename(filename),
          components(metadata),
          metadata,
          partitioner);
    this.repairedAt = repairedAt;

    if (compression)
    {
        dataFile = SequentialWriter.open(getFilename(),
                                         descriptor.filenameFor(Component.COMPRESSION_INFO),
                                         metadata.compressionParameters(),
                                         sstableMetadataCollector);
        dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
    }
    else
    {
        dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC)));
        dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
    }
    iwriter = new IndexWriter(keyCount, dataFile);

    this.sstableMetadataCollector = sstableMetadataCollector;
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:31,代码来源:SSTableWriter.java

示例4: open

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public static CompressedSequentialWriter open(String dataFilePath,
                                              String offsetsPath,
                                              CompressionParameters parameters,
                                              MetadataCollector sstableMetadataCollector)
{
    return new CompressedSequentialWriter(new File(dataFilePath), offsetsPath, parameters, sstableMetadataCollector);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:8,代码来源:SequentialWriter.java

示例5: SSTableWriter

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public SSTableWriter(String filename,
                     long keyCount,
                     CFMetaData metadata,
                     IPartitioner<?> partitioner,
                     SSTableMetadata.Collector sstableMetadataCollector)
{
    super(Descriptor.fromFilename(filename),
          components(metadata),
          metadata,
          partitioner);
    iwriter = new IndexWriter(keyCount);

    if (compression)
    {
        dbuilder = SegmentedFile.getCompressedBuilder();
        dataFile = CompressedSequentialWriter.open(getFilename(),
                                                   descriptor.filenameFor(Component.COMPRESSION_INFO),
                                                   !metadata.populateIoCacheOnFlush(),
                                                   metadata.compressionParameters(),
                                                   sstableMetadataCollector);
    }
    else
    {
        dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
        dataFile = SequentialWriter.open(new File(getFilename()), !metadata.populateIoCacheOnFlush());
        dataFile.setDataIntegrityWriter(DataIntegrityMetadata.checksumWriter(descriptor));
    }

    this.sstableMetadataCollector = sstableMetadataCollector;
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:31,代码来源:SSTableWriter.java

示例6: open

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public static CompressedSequentialWriter open(String dataFilePath,
                                              String offsetsPath,
                                              CompressionParams parameters,
                                              MetadataCollector sstableMetadataCollector)
{
    return new CompressedSequentialWriter(new File(dataFilePath), offsetsPath, parameters, sstableMetadataCollector);
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:8,代码来源:SequentialWriter.java

示例7: SSTableWriter

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public SSTableWriter(String filename,
                     long keyCount,
                     long repairedAt,
                     CFMetaData metadata,
                     IPartitioner<?> partitioner,
                     MetadataCollector sstableMetadataCollector)
{
    super(Descriptor.fromFilename(filename),
          components(metadata),
          metadata,
          partitioner);
    this.repairedAt = repairedAt;
    iwriter = new IndexWriter(keyCount);

    if (compression)
    {
        dataFile = SequentialWriter.open(getFilename(),
                                         descriptor.filenameFor(Component.COMPRESSION_INFO),
                                         metadata.compressionParameters(),
                                         sstableMetadataCollector);
        dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
    }
    else
    {
        dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC)));
        dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
    }

    this.sstableMetadataCollector = sstableMetadataCollector;
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:31,代码来源:SSTableWriter.java

示例8: SSTableWriter

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public SSTableWriter(String filename,
                     long keyCount,
                     CFMetaData metadata,
                     IPartitioner<?> partitioner,
                     SSTableMetadata.Collector sstableMetadataCollector)
{
    super(Descriptor.fromFilename(filename),
          components(metadata),
          metadata,
          partitioner);
    iwriter = new IndexWriter(keyCount);

    if (compression)
    {
        dbuilder = SegmentedFile.getCompressedBuilder();
        dataFile = CompressedSequentialWriter.open(getFilename(),
                                                   descriptor.filenameFor(Component.COMPRESSION_INFO),
                                                   !metadata.populateIoCacheOnFlush(),
                                                   metadata.compressionParameters(),
                                                   sstableMetadataCollector);
    }
    else
    {
        dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
        dataFile = SequentialWriter.open(new File(getFilename()),
                                         !metadata.populateIoCacheOnFlush());
        dataFile.setComputeDigest();
    }

    this.sstableMetadataCollector = sstableMetadataCollector;
}
 
开发者ID:dprguiuc,项目名称:Cassandra-Wasef,代码行数:32,代码来源:SSTableWriter.java

示例9: SSTableWriter

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public SSTableWriter(String filename,
                     long keyCount,
                     CFMetaData metadata,
                     IPartitioner<?> partitioner,
                     MetadataCollector sstableMetadataCollector)
{
    super(Descriptor.fromFilename(filename),
          components(metadata),
          metadata,
          partitioner);
    iwriter = new IndexWriter(keyCount);

    if (compression)
    {
        dbuilder = SegmentedFile.getCompressedBuilder();
        dataFile = CompressedSequentialWriter.open(getFilename(),
                                                   descriptor.filenameFor(Component.COMPRESSION_INFO),
                                                   !metadata.populateIoCacheOnFlush(),
                                                   metadata.compressionParameters(),
                                                   sstableMetadataCollector);
    }
    else
    {
        dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
        dataFile = SequentialWriter.open(new File(getFilename()), !metadata.populateIoCacheOnFlush());
        dataFile.setDataIntegrityWriter(DataIntegrityMetadata.checksumWriter(descriptor));
    }

    this.sstableMetadataCollector = sstableMetadataCollector;
}
 
开发者ID:mafernandez-stratio,项目名称:cassandra-cqlMod,代码行数:31,代码来源:SSTableWriter.java

示例10: open

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public static CompressedSequentialWriter open(String dataFilePath,
                                              String offsetsPath,
                                              boolean skipIOCache,
                                              CompressionParameters parameters,
                                              MetadataCollector sstableMetadataCollector)
{
    return new CompressedSequentialWriter(new File(dataFilePath), offsetsPath, skipIOCache, parameters, sstableMetadataCollector);
}
 
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:9,代码来源:SequentialWriter.java

示例11: Builder

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public Builder(CompressedSequentialWriter writer)
{
    super(writer);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:5,代码来源:CompressedPoolingSegmentedFile.java

示例12: getCompressedBuilder

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public static Builder getCompressedBuilder(CompressedSequentialWriter writer)
{
    return new CompressedPoolingSegmentedFile.Builder(writer);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:5,代码来源:SegmentedFile.java

示例13: Builder

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public Builder(CompressedSequentialWriter writer)
{
    this.writer = writer;
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:5,代码来源:CompressedSegmentedFile.java

示例14: getCompressedBuilder

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
public static Builder getCompressedBuilder(CompressedSequentialWriter writer)
{
    return new CompressedSegmentedFile.Builder(writer);
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:5,代码来源:SegmentedFile.java

示例15: testMapForCompressionMetadata

import org.apache.cassandra.io.compress.CompressedSequentialWriter; //导入依赖的package包/类
@Test
public void testMapForCompressionMetadata() throws Exception
{
    int OLD_MAX_SEGMENT_SIZE = MmappedRegions.MAX_SEGMENT_SIZE;
    MmappedRegions.MAX_SEGMENT_SIZE = 1024;

    ByteBuffer buffer = allocateBuffer(128 * 1024);
    File f = File.createTempFile("testMapForCompressionMetadata", "1");
    f.deleteOnExit();

    File cf = File.createTempFile(f.getName() + ".metadata", "1");
    cf.deleteOnExit();

    MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
    try(SequentialWriter writer = new CompressedSequentialWriter(f,
                                                                 cf.getAbsolutePath(),
                                                                 CompressionParams.snappy(),
                                                                 sstableMetadataCollector))
    {
        writer.write(buffer);
        writer.finish();
    }

    CompressionMetadata metadata = new CompressionMetadata(cf.getAbsolutePath(), f.length(), ChecksumType.CRC32);
    try(ChannelProxy channel = new ChannelProxy(f);
        MmappedRegions regions = MmappedRegions.map(channel, metadata))
    {

        assertFalse(regions.isEmpty());
        int i = 0;
        while(i < buffer.capacity())
        {
            CompressionMetadata.Chunk chunk = metadata.chunkFor(i);

            MmappedRegions.Region region = regions.floor(chunk.offset);
            assertNotNull(region);

            ByteBuffer compressedChunk = region.buffer.duplicate();
            assertNotNull(compressedChunk);
            assertEquals(chunk.length + 4, compressedChunk.capacity());

            assertEquals(chunk.offset, region.bottom());
            assertEquals(chunk.offset + chunk.length + 4, region.top());

            i += metadata.chunkLength();
        }
    }
    finally
    {
        MmappedRegions.MAX_SEGMENT_SIZE = OLD_MAX_SEGMENT_SIZE;
        metadata.close();
    }
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:54,代码来源:MmappedRegionsTest.java


注:本文中的org.apache.cassandra.io.compress.CompressedSequentialWriter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。