当前位置: 首页>>代码示例>>Java>>正文


Java MetadataCollector类代码示例

本文整理汇总了Java中org.apache.cassandra.io.sstable.metadata.MetadataCollector的典型用法代码示例。如果您正苦于以下问题:Java MetadataCollector类的具体用法?Java MetadataCollector怎么用?Java MetadataCollector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


MetadataCollector类属于org.apache.cassandra.io.sstable.metadata包,在下文中一共展示了MetadataCollector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: CompressedSequentialWriter

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public CompressedSequentialWriter(File file,
                                  String offsetsPath,
                                  CompressionParameters parameters,
                                  MetadataCollector sstableMetadataCollector)
{
    super(file, parameters.chunkLength());
    this.compressor = parameters.sstableCompressor;

    // buffer for compression should be the same size as buffer itself
    compressed = new ICompressor.WrappedArray(new byte[compressor.initialCompressedBufferLength(buffer.length)]);

    /* Index File (-CompressionInfo.db component) and it's header */
    metadataWriter = CompressionMetadata.Writer.open(parameters, offsetsPath);

    this.sstableMetadataCollector = sstableMetadataCollector;
    crcMetadata = new DataIntegrityMetadata.ChecksumWriter(out);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:18,代码来源:CompressedSequentialWriter.java

示例2: writeFile

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
private SSTableReader writeFile(ColumnFamilyStore cfs, int count)
{
    ArrayBackedSortedColumns cf = ArrayBackedSortedColumns.factory.create(cfs.metadata);
    for (int i = 0; i < count; i++)
        cf.addColumn(Util.column(String.valueOf(i), "a", 1));
    File dir = cfs.directories.getDirectoryForNewSSTables();
    String filename = cfs.getTempSSTablePath(dir);

    SSTableWriter writer = new SSTableWriter(filename,
            0,
            0,
            cfs.metadata,
            StorageService.getPartitioner(),
            new MetadataCollector(cfs.metadata.comparator));

    for (int i = 0; i < count * 5; i++)
        writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
    return writer.closeAndOpenReader();
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:20,代码来源:AntiCompactionTest.java

示例3: writeFile

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
private SSTableReader writeFile(ColumnFamilyStore cfs, int count)
{
    ArrayBackedSortedColumns cf = ArrayBackedSortedColumns.factory.create(cfs.metadata);
    for (int i = 0; i < count / 100; i++)
        cf.addColumn(Util.cellname(i), random(0, 1000), 1);
    File dir = cfs.directories.getDirectoryForNewSSTables();
    String filename = cfs.getTempSSTablePath(dir);

    SSTableWriter writer = new SSTableWriter(filename,
            0,
            0,
            cfs.metadata,
            StorageService.getPartitioner(),
            new MetadataCollector(cfs.metadata.comparator));

    for (int i = 0; i < count * 5; i++)
        writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
    return writer.closeAndOpenReader();
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:20,代码来源:SSTableRewriterTest.java

示例4: BigTableWriter

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public BigTableWriter(Descriptor descriptor, 
                      Long keyCount, 
                      Long repairedAt, 
                      CFMetaData metadata, 
                      MetadataCollector metadataCollector, 
                      SerializationHeader header,
                      LifecycleTransaction txn)
{
    super(descriptor, keyCount, repairedAt, metadata, metadataCollector, header);
    txn.trackNew(this); // must track before any files are created

    if (compression)
    {
        dataFile = SequentialWriter.open(getFilename(),
                                         descriptor.filenameFor(Component.COMPRESSION_INFO),
                                         metadata.params.compression,
                                         metadataCollector);
        dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
    }
    else
    {
        dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC)));
        dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode(), false);
    }
    iwriter = new IndexWriter(keyCount, dataFile);
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:27,代码来源:BigTableWriter.java

示例5: CompressedSequentialWriter

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public CompressedSequentialWriter(File file,
                                  String offsetsPath,
                                  CompressionParams parameters,
                                  MetadataCollector sstableMetadataCollector)
{
    super(file, parameters.chunkLength(), parameters.getSstableCompressor().preferredBufferType());
    this.compressor = parameters.getSstableCompressor();

    // buffer for compression should be the same size as buffer itself
    compressed = compressor.preferredBufferType().allocate(compressor.initialCompressedBufferLength(buffer.capacity()));

    /* Index File (-CompressionInfo.db component) and it's header */
    metadataWriter = CompressionMetadata.Writer.open(parameters, offsetsPath);

    this.sstableMetadataCollector = sstableMetadataCollector;
    crcMetadata = new DataIntegrityMetadata.ChecksumWriter(new DataOutputStream(Channels.newOutputStream(channel)));
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:18,代码来源:CompressedSequentialWriter.java

示例6: createCompactionWriter

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
private SSTableWriter createCompactionWriter(long repairedAt)
{
    MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.getComparator());

    // Get the max timestamp of the precompacted sstables
    // and adds generation of live ancestors
    // -- note that we always only have one SSTable in toUpgrade here:
    for (SSTableReader sstable : toUpgrade)
    {
        sstableMetadataCollector.addAncestor(sstable.descriptor.generation);
        for (Integer i : sstable.getAncestors())
        {
            if (new File(sstable.descriptor.withGeneration(i).filenameFor(Component.DATA)).exists())
                sstableMetadataCollector.addAncestor(i);
        }
        sstableMetadataCollector.sstableLevel(sstable.getSSTableLevel());
    }

    return new SSTableWriter(cfs.getTempSSTablePath(directory), estimatedRows, repairedAt, cfs.metadata, cfs.partitioner, sstableMetadataCollector);
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:21,代码来源:Upgrader.java

示例7: createCompactionWriter

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
private SSTableWriter createCompactionWriter()
{
    MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.getComparator());

    // Get the max timestamp of the precompacted sstables
    // and adds generation of live ancestors
    for (SSTableReader sstable : toUpgrade)
    {
        sstableMetadataCollector.addAncestor(sstable.descriptor.generation);
        for (Integer i : sstable.getAncestors())
        {
            if (new File(sstable.descriptor.withGeneration(i).filenameFor(Component.DATA)).exists())
                sstableMetadataCollector.addAncestor(i);
        }
    }

    return new SSTableWriter(cfs.getTempSSTablePath(directory), estimatedRows, cfs.metadata, cfs.partitioner, sstableMetadataCollector);
}
 
开发者ID:mafernandez-stratio,项目名称:cassandra-cqlMod,代码行数:19,代码来源:Upgrader.java

示例8: CompressedSequentialWriter

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public CompressedSequentialWriter(File file,
                                  String indexFilePath,
                                  boolean skipIOCache,
                                  CompressionParameters parameters,
                                  MetadataCollector sstableMetadataCollector)
{
    super(file, parameters.chunkLength(), skipIOCache);
    this.compressor = parameters.sstableCompressor;

    // buffer for compression should be the same size as buffer itself
    compressed = new ICompressor.WrappedArray(new byte[compressor.initialCompressedBufferLength(buffer.length)]);

    /* Index File (-CompressionInfo.db component) and it's header */
    metadataWriter = CompressionMetadata.Writer.open(indexFilePath);
    metadataWriter.writeHeader(parameters);

    this.sstableMetadataCollector = sstableMetadataCollector;
}
 
开发者ID:mafernandez-stratio,项目名称:cassandra-cqlMod,代码行数:19,代码来源:CompressedSequentialWriter.java

示例9: CompressedSequentialWriter

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public CompressedSequentialWriter(File file,
                                  String offsetsPath,
                                  boolean skipIOCache,
                                  CompressionParameters parameters,
                                  MetadataCollector sstableMetadataCollector)
{
    super(file, parameters.chunkLength(), skipIOCache);
    this.compressor = parameters.sstableCompressor;

    // buffer for compression should be the same size as buffer itself
    compressed = new ICompressor.WrappedArray(new byte[compressor.initialCompressedBufferLength(buffer.length)]);

    /* Index File (-CompressionInfo.db component) and it's header */
    metadataWriter = CompressionMetadata.Writer.open(offsetsPath);
    metadataWriter.writeHeader(parameters);

    this.sstableMetadataCollector = sstableMetadataCollector;
    crcMetadata = new DataIntegrityMetadata.ChecksumWriter(out);
}
 
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:20,代码来源:CompressedSequentialWriter.java

示例10: create

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
@SuppressWarnings("resource") // SimpleSSTableMultiWriter closes writer
public static SSTableMultiWriter create(Descriptor descriptor,
                                        long keyCount,
                                        long repairedAt,
                                        CFMetaData cfm,
                                        MetadataCollector metadataCollector,
                                        SerializationHeader header,
                                        LifecycleTransaction txn)
{
    SSTableWriter writer = SSTableWriter.create(descriptor, keyCount, repairedAt, cfm, metadataCollector, header, txn);
    return new SimpleSSTableMultiWriter(writer, txn);
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:13,代码来源:SimpleSSTableMultiWriter.java

示例11: open

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
@Override
public SSTableWriter open(Descriptor descriptor,
                          long keyCount,
                          long repairedAt,
                          CFMetaData metadata,
                          MetadataCollector metadataCollector,
                          SerializationHeader header,
                          LifecycleTransaction txn)
{
    return new BigTableWriter(descriptor, keyCount, repairedAt, metadata, metadataCollector, header);
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:12,代码来源:BigFormat.java

示例12: BigTableWriter

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public BigTableWriter(Descriptor descriptor,
                      long keyCount,
                      long repairedAt,
                      CFMetaData metadata,
                      MetadataCollector metadataCollector,
                      SerializationHeader header)
{
    super(descriptor, keyCount, repairedAt, metadata, metadataCollector, header);
    //txn.trackNew(this); // must track before any files are created

    if (compression)
    {
        dataFile = new CompressedSequentialWriter(getFilename(),
                                         descriptor.filenameFor(Component.COMPRESSION_INFO),
                                         descriptor.filenameFor(descriptor.digestComponent),
                                         writerOption,
                                         metadata.params.compression,
                                         metadataCollector, descriptor.getConfiguration());
    }
    else
    {
        dataFile = new ChecksummedSequentialWriter(getFilename(),
                descriptor.filenameFor(Component.CRC),
                descriptor.filenameFor(descriptor.digestComponent),
                writerOption,
                descriptor.getConfiguration());
    }
    dbuilder = new FileHandle.Builder(descriptor.filenameFor(Component.DATA))
                             .withConfiguration(descriptor.getConfiguration())
                             .compressed(compression);
    //chunkCache.ifPresent(dbuilder::withChunkCache);
    iwriter = new IndexWriter(keyCount);

    columnIndexWriter = new ColumnIndex(this.header, dataFile, descriptor.version, this.observers,
                                        getRowIndexEntrySerializer().indexInfoSerializer());
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:37,代码来源:BigTableWriter.java

示例13: SSTableWriter

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
protected SSTableWriter(Descriptor descriptor,
                        long keyCount,
                        long repairedAt,
                        CFMetaData metadata,
                        MetadataCollector metadataCollector,
                        SerializationHeader header)
{
    super(descriptor, components(metadata), metadata, DatabaseDescriptor.getDiskOptimizationStrategy());
    this.keyCount = keyCount;
    this.repairedAt = repairedAt;
    this.metadataCollector = metadataCollector;
    this.header = header != null ? header : SerializationHeader.makeWithoutStats(metadata); //null header indicates streaming from pre-3.0 sstable
    this.rowIndexEntrySerializer = descriptor.version.getSSTableFormat().getIndexSerializer(metadata, descriptor.version, header);
    this.observers = Collections.emptySet();
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:16,代码来源:SSTableWriter.java

示例14: create

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public static SSTableWriter create(Descriptor descriptor,
                                   Long keyCount,
                                   Long repairedAt,
                                   CFMetaData metadata,
                                   MetadataCollector metadataCollector,
                                   SerializationHeader header,
                                   LifecycleTransaction txn)
{
    Factory writerFactory = descriptor.getFormat().getWriterFactory();
    return writerFactory.open(descriptor, keyCount, repairedAt, metadata, metadataCollector, header, txn);
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:12,代码来源:SSTableWriter.java

示例15: open

import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public abstract SSTableWriter open(Descriptor descriptor,
long keyCount,
long repairedAt,
CFMetaData metadata,
MetadataCollector metadataCollector,
SerializationHeader header,
LifecycleTransaction txn);
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:8,代码来源:SSTableWriter.java


注:本文中的org.apache.cassandra.io.sstable.metadata.MetadataCollector类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。