当前位置: 首页>>代码示例>>Java>>正文


Java SegmentedFile.getBuilder方法代码示例

本文整理汇总了Java中org.apache.cassandra.io.util.SegmentedFile.getBuilder方法的典型用法代码示例。如果您正苦于以下问题:Java SegmentedFile.getBuilder方法的具体用法?Java SegmentedFile.getBuilder怎么用?Java SegmentedFile.getBuilder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.cassandra.io.util.SegmentedFile的用法示例。


在下文中一共展示了SegmentedFile.getBuilder方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: load

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
 * @param saveSummaryIfCreated for bulk loading purposes, if the summary was absent and needed to be built, you can
 *                             avoid persisting it to disk by setting this to false
 */
private void load(boolean recreateBloomFilter, boolean saveSummaryIfCreated) throws IOException
{
    SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
    SegmentedFile.Builder dbuilder = compression
                                     ? SegmentedFile.getCompressedBuilder()
                                     : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());

    boolean summaryLoaded = loadSummary(ibuilder, dbuilder);
    if (recreateBloomFilter || !summaryLoaded)
        buildSummary(recreateBloomFilter, ibuilder, dbuilder, summaryLoaded, Downsampling.BASE_SAMPLING_LEVEL);

    ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
    dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
    if (saveSummaryIfCreated && (recreateBloomFilter || !summaryLoaded)) // save summary information to disk
        saveSummary(ibuilder, dbuilder);
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:22,代码来源:SSTableReader.java

示例2: SSTableWriter

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
public SSTableWriter(String filename,
                     long keyCount,
                     long repairedAt,
                     CFMetaData metadata,
                     IPartitioner<?> partitioner,
                     MetadataCollector sstableMetadataCollector)
{
    super(Descriptor.fromFilename(filename),
          components(metadata),
          metadata,
          partitioner);
    this.repairedAt = repairedAt;
    iwriter = new IndexWriter(keyCount);

    if (compression)
    {
        dataFile = SequentialWriter.open(getFilename(),
                                         descriptor.filenameFor(Component.COMPRESSION_INFO),
                                         metadata.compressionParameters(),
                                         sstableMetadataCollector);
        dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
    }
    else
    {
        dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC)));
        dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
    }

    this.sstableMetadataCollector = sstableMetadataCollector;
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:31,代码来源:SSTableWriter.java

示例3: IndexWriter

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
IndexWriter(long keyCount)
{
    indexFile = SequentialWriter.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
    builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
    summary = new IndexSummaryBuilder(keyCount, metadata.getMinIndexInterval(), Downsampling.BASE_SAMPLING_LEVEL);
    bf = FilterFactory.getFilter(keyCount, metadata.getBloomFilterFpChance(), true);
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:8,代码来源:SSTableWriter.java

示例4: SSTableWriter

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
public SSTableWriter(String filename, long keyCount, CFMetaData metadata, IPartitioner partitioner, ReplayPosition replayPosition) throws IOException
{
    super(Descriptor.fromFilename(filename),
          new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS)),
          metadata,
          replayPosition,
          partitioner,
          SSTable.defaultRowHistogram(),
          SSTable.defaultColumnHistogram());
    iwriter = new IndexWriter(descriptor, partitioner, keyCount);
    dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
    dataFile = new BufferedRandomAccessFile(new File(getFilename()), "rw", BufferedRandomAccessFile.DEFAULT_BUFFER_SIZE, true);
}
 
开发者ID:devdattakulkarni,项目名称:Cassandra-KVPM,代码行数:14,代码来源:SSTableWriter.java

示例5: IndexWriter

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
IndexWriter(Descriptor desc, IPartitioner part, long keyCount) throws IOException
{
    this.desc = desc;
    this.partitioner = part;
    indexFile = new BufferedRandomAccessFile(new File(desc.filenameFor(SSTable.COMPONENT_INDEX)), "rw", 8 * 1024 * 1024, true);
    builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
    summary = new IndexSummary(keyCount);
    bf = BloomFilter.getFilter(keyCount, 15);
}
 
开发者ID:devdattakulkarni,项目名称:Cassandra-KVPM,代码行数:10,代码来源:SSTableWriter.java

示例6: load

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
 * @param saveSummaryIfCreated for bulk loading purposes, if the summary was absent and needed to be built, you can
 *                             avoid persisting it to disk by setting this to false
 */
private void load(boolean recreateBloomFilter, boolean saveSummaryIfCreated) throws IOException
{
    SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
    SegmentedFile.Builder dbuilder = compression
                                     ? SegmentedFile.getCompressedBuilder()
                                     : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());

    boolean summaryLoaded = loadSummary(ibuilder, dbuilder);
    boolean builtSummary = false;
    if (recreateBloomFilter || !summaryLoaded)
    {
        buildSummary(recreateBloomFilter, ibuilder, dbuilder, summaryLoaded, Downsampling.BASE_SAMPLING_LEVEL);
        builtSummary = true;
    }

    ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
    dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));

    // Check for an index summary that was downsampled even though the serialization format doesn't support
    // that.  If it was downsampled, rebuild it.  See CASSANDRA-8993 for details.
    if (!descriptor.version.hasSamplingLevel && !builtSummary && !validateSummarySamplingLevel())
    {
        indexSummary.close();
        ifile.close();
        dfile.close();

        logger.info("Detected erroneously downsampled index summary; will rebuild summary at full sampling");
        FileUtils.deleteWithConfirm(new File(descriptor.filenameFor(Component.SUMMARY)));
        ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
        dbuilder = compression
                   ? SegmentedFile.getCompressedBuilder()
                   : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
        buildSummary(false, ibuilder, dbuilder, false, Downsampling.BASE_SAMPLING_LEVEL);
        ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
        dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
        saveSummary(ibuilder, dbuilder);
    }
    else if (saveSummaryIfCreated && builtSummary)
    {
        saveSummary(ibuilder, dbuilder);
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:48,代码来源:SSTableReader.java

示例7: load

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
 */
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException
{
    boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
    SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
    SegmentedFile.Builder dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());

    // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
    BufferedRandomAccessFile input = new BufferedRandomAccessFile(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)),
                                                                  "r",
                                                                  BufferedRandomAccessFile.DEFAULT_BUFFER_SIZE,
                                                                  true);
    try
    {
        if (keyCache != null && keyCache.getCapacity() - keyCache.size() < keysToLoadInCache.size())
            keyCache.updateCapacity(keyCache.size() + keysToLoadInCache.size());

        long indexSize = input.length();
        long estimatedKeys = SSTable.estimateRowsFromIndex(input);
        indexSummary = new IndexSummary(estimatedKeys);
        if (recreatebloom)
            // estimate key count based on index length
            bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
        while (true)
        {
            long indexPosition = input.getFilePointer();
            if (indexPosition == indexSize)
                break;

            boolean shouldAddEntry = indexSummary.shouldAddEntry();
            ByteBuffer key = (shouldAddEntry || cacheLoading || recreatebloom)
                         ? ByteBufferUtil.readWithShortLength(input)
                         : ByteBufferUtil.skipShortLength(input);
            long dataPosition = input.readLong();
            if (key != null)
            {
                DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
                if (recreatebloom)
                    bf.add(decoratedKey.key);
                if (shouldAddEntry)
                    indexSummary.addEntry(decoratedKey, indexPosition);
                if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
                    cacheKey(decoratedKey, dataPosition);
            }

            indexSummary.incrementRowid();
            ibuilder.addPotentialBoundary(indexPosition);
            dbuilder.addPotentialBoundary(dataPosition);
        }
        indexSummary.complete();
    }
    finally
    {
        FileUtils.closeQuietly(input);
    }

    // finalize the state of the reader
    ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
    dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
 
开发者ID:devdattakulkarni,项目名称:Cassandra-KVPM,代码行数:63,代码来源:SSTableReader.java


注:本文中的org.apache.cassandra.io.util.SegmentedFile.getBuilder方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。