当前位置: 首页>>代码示例>>Java>>正文


Java SegmentedFile.Builder方法代码示例

本文整理汇总了Java中org.apache.cassandra.io.util.SegmentedFile.Builder方法的典型用法代码示例。如果您正苦于以下问题:Java SegmentedFile.Builder方法的具体用法?Java SegmentedFile.Builder怎么用?Java SegmentedFile.Builder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.cassandra.io.util.SegmentedFile的用法示例。


在下文中一共展示了SegmentedFile.Builder方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: load

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
 * @param saveSummaryIfCreated for bulk loading purposes, if the summary was absent and needed to be built, you can
 *                             avoid persisting it to disk by setting this to false
 */
private void load(boolean recreateBloomFilter, boolean saveSummaryIfCreated) throws IOException
{
    SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
    SegmentedFile.Builder dbuilder = compression
                                     ? SegmentedFile.getCompressedBuilder()
                                     : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());

    boolean summaryLoaded = loadSummary(ibuilder, dbuilder);
    if (recreateBloomFilter || !summaryLoaded)
        buildSummary(recreateBloomFilter, ibuilder, dbuilder, summaryLoaded, Downsampling.BASE_SAMPLING_LEVEL);

    ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
    dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
    if (saveSummaryIfCreated && (recreateBloomFilter || !summaryLoaded)) // save summary information to disk
        saveSummary(ibuilder, dbuilder);
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:22,代码来源:SSTableReader.java

示例2: loadSummary

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Load index summary from Summary.db file if it exists.
 *
 * if loaded index summary has different index interval from current value stored in schema,
 * then Summary.db file will be deleted and this returns false to rebuild summary.
 *
 * @param ibuilder
 * @param dbuilder
 * @return true if index summary is loaded successfully from Summary.db file.
 */
public boolean loadSummary(SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
    File summariesFile = new File(descriptor.filenameFor(Component.SUMMARY));
    if (!summariesFile.exists())
        return false;

    DataInputStream iStream = null;
    try
    {
        iStream = new DataInputStream(new FileInputStream(summariesFile));
        indexSummary = IndexSummary.serializer.deserialize(
                iStream, partitioner, descriptor.version.hasSamplingLevel,
                metadata.getMinIndexInterval(), metadata.getMaxIndexInterval());
        first = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
        last = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
        ibuilder.deserializeBounds(iStream);
        dbuilder.deserializeBounds(iStream);
    }
    catch (IOException e)
    {
        if (indexSummary != null)
            indexSummary.close();
        logger.debug("Cannot deserialize SSTable Summary File {}: {}", summariesFile.getPath(), e.getMessage());
        // corrupted; delete it and fall back to creating a new summary
        FileUtils.closeQuietly(iStream);
        // delete it and fall back to creating a new summary
        FileUtils.deleteWithConfirm(summariesFile);
        return false;
    }
    finally
    {
        FileUtils.closeQuietly(iStream);
    }

    return true;
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:47,代码来源:SSTableReader.java

示例3: loadSummary

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Load index summary from Summary.db file if it exists.
 *
 * if loaded index summary has different index interval from current value stored in schema,
 * then Summary.db file will be deleted and this returns false to rebuild summary.
 *
 * @param ibuilder
 * @param dbuilder
 * @return true if index summary is loaded successfully from Summary.db file.
 */
public boolean loadSummary(SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
    File summariesFile = new File(descriptor.filenameFor(Component.SUMMARY));
    if (!summariesFile.exists())
        return false;

    DataInputStream iStream = null;
    try
    {
        iStream = new DataInputStream(new FileInputStream(summariesFile));
        indexSummary = IndexSummary.serializer.deserialize(iStream, partitioner, descriptor.version.hasSamplingLevel, metadata.getMinIndexInterval(), metadata.getMaxIndexInterval());
        first = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
        last = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
        ibuilder.deserializeBounds(iStream);
        dbuilder.deserializeBounds(iStream);
    }
    catch (IOException e)
    {
        logger.debug("Cannot deserialize SSTable Summary File {}: {}", summariesFile.getPath(), e.getMessage());
        // corrupted; delete it and fall back to creating a new summary
        FileUtils.closeQuietly(iStream);
        // delete it and fall back to creating a new summary
        FileUtils.deleteWithConfirm(summariesFile);
        return false;
    }
    finally
    {
        FileUtils.closeQuietly(iStream);
    }

    return true;
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:43,代码来源:SSTableReader.java

示例4: load

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
 * @param saveSummaryIfCreated for bulk loading purposes, if the summary was absent and needed to be built, you can
 *                             avoid persisting it to disk by setting this to false
 */
private void load(boolean recreateBloomFilter, boolean saveSummaryIfCreated) throws IOException
{
    SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
    SegmentedFile.Builder dbuilder = compression
                                     ? SegmentedFile.getCompressedBuilder()
                                     : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());

    boolean summaryLoaded = loadSummary(ibuilder, dbuilder);
    boolean builtSummary = false;
    if (recreateBloomFilter || !summaryLoaded)
    {
        buildSummary(recreateBloomFilter, ibuilder, dbuilder, summaryLoaded, Downsampling.BASE_SAMPLING_LEVEL);
        builtSummary = true;
    }

    ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
    dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));

    // Check for an index summary that was downsampled even though the serialization format doesn't support
    // that.  If it was downsampled, rebuild it.  See CASSANDRA-8993 for details.
    if (!descriptor.version.hasSamplingLevel && !builtSummary && !validateSummarySamplingLevel())
    {
        indexSummary.close();
        ifile.close();
        dfile.close();

        logger.info("Detected erroneously downsampled index summary; will rebuild summary at full sampling");
        FileUtils.deleteWithConfirm(new File(descriptor.filenameFor(Component.SUMMARY)));
        ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
        dbuilder = compression
                   ? SegmentedFile.getCompressedBuilder()
                   : SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
        buildSummary(false, ibuilder, dbuilder, false, Downsampling.BASE_SAMPLING_LEVEL);
        ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
        dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
        saveSummary(ibuilder, dbuilder);
    }
    else if (saveSummaryIfCreated && builtSummary)
    {
        saveSummary(ibuilder, dbuilder);
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:48,代码来源:SSTableReader.java

示例5: buildSummary

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Build index summary(and optionally bloom filter) by reading through Index.db file.
 *
 * @param recreateBloomFilter true if recreate bloom filter
 * @param ibuilder
 * @param dbuilder
 * @param summaryLoaded true if index summary is already loaded and not need to build again
 * @throws IOException
 */
private void buildSummary(boolean recreateBloomFilter, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder, boolean summaryLoaded, int samplingLevel) throws IOException
{
    // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
    RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));

    try
    {
        long indexSize = primaryIndex.length();
        long histogramCount = sstableMetadata.estimatedRowSize.count();
        long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
                             ? histogramCount
                             : estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional

        try(IndexSummaryBuilder summaryBuilder = summaryLoaded ? null : new IndexSummaryBuilder(estimatedKeys, metadata.getMinIndexInterval(), samplingLevel))
        {

            if (recreateBloomFilter)
                bf = FilterFactory.getFilter(estimatedKeys, metadata.getBloomFilterFpChance(), true);

            long indexPosition;
            while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
            {
                ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
                RowIndexEntry indexEntry = metadata.comparator.rowIndexEntrySerializer().deserialize(primaryIndex, descriptor.version);
                DecoratedKey decoratedKey = partitioner.decorateKey(key);
                if (first == null)
                    first = decoratedKey;
                last = decoratedKey;

                if (recreateBloomFilter)
                    bf.add(decoratedKey.getKey());

                // if summary was already read from disk we don't want to re-populate it using primary index
                if (!summaryLoaded)
                {
                    summaryBuilder.maybeAddEntry(decoratedKey, indexPosition);
                    ibuilder.addPotentialBoundary(indexPosition);
                    dbuilder.addPotentialBoundary(indexEntry.position);
                }
            }

            if (!summaryLoaded)
                indexSummary = summaryBuilder.build(partitioner);
        }
    }
    finally
    {
        FileUtils.closeQuietly(primaryIndex);
    }

    first = getMinimalKey(first);
    last = getMinimalKey(last);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:63,代码来源:SSTableReader.java

示例6: buildSummary

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Build index summary(and optionally bloom filter) by reading through Index.db file.
 *
 * @param recreateBloomFilter true if recreate bloom filter
 * @param ibuilder
 * @param dbuilder
 * @param summaryLoaded true if index summary is already loaded and not need to build again
 * @throws IOException
 */
private void buildSummary(boolean recreateBloomFilter, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder, boolean summaryLoaded, int samplingLevel) throws IOException
{
    // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
    RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));

    try
    {
        long indexSize = primaryIndex.length();
        long histogramCount = sstableMetadata.estimatedRowSize.count();
        long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
                           ? histogramCount
                           : estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional

        if (recreateBloomFilter)
            bf = FilterFactory.getFilter(estimatedKeys, metadata.getBloomFilterFpChance(), true);

        IndexSummaryBuilder summaryBuilder = null;
        if (!summaryLoaded)
            summaryBuilder = new IndexSummaryBuilder(estimatedKeys, metadata.getMinIndexInterval(), samplingLevel);

        long indexPosition;
        while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
        {
            ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
            RowIndexEntry indexEntry = metadata.comparator.rowIndexEntrySerializer().deserialize(primaryIndex, descriptor.version);
            DecoratedKey decoratedKey = partitioner.decorateKey(key);
            if (first == null)
                first = decoratedKey;
            last = decoratedKey;

            if (recreateBloomFilter)
                bf.add(decoratedKey.getKey());

            // if summary was already read from disk we don't want to re-populate it using primary index
            if (!summaryLoaded)
            {
                summaryBuilder.maybeAddEntry(decoratedKey, indexPosition);
                ibuilder.addPotentialBoundary(indexPosition);
                dbuilder.addPotentialBoundary(indexEntry.position);
            }
        }

        if (!summaryLoaded)
            indexSummary = summaryBuilder.build(partitioner);
    }
    finally
    {
        FileUtils.closeQuietly(primaryIndex);
    }

    first = getMinimalKey(first);
    last = getMinimalKey(last);
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:63,代码来源:SSTableReader.java

示例7: buildSummary

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Build index summary(and optionally bloom filter) by reading through Index.db file.
 *
 * @param recreateBloomFilter true if recreate bloom filter
 * @param ibuilder
 * @param dbuilder
 * @param summaryLoaded true if index summary is already loaded and not need to build again
 * @throws IOException
 */
private void buildSummary(boolean recreateBloomFilter, SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder, boolean summaryLoaded, int samplingLevel) throws IOException
{
    // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
    RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));

    try
    {
        long indexSize = primaryIndex.length();
        long histogramCount = sstableMetadata.estimatedRowSize.count();
        long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedRowSize.isOverflowed()
                           ? histogramCount
                           : estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional

        if (recreateBloomFilter)
            bf = FilterFactory.getFilter(estimatedKeys, metadata.getBloomFilterFpChance(), true);

        IndexSummaryBuilder summaryBuilder = null;
        if (!summaryLoaded)
            summaryBuilder = new IndexSummaryBuilder(estimatedKeys, metadata.getMinIndexInterval(), samplingLevel);

        long indexPosition;
        while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
        {
            ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
            RowIndexEntry indexEntry = metadata.comparator.rowIndexEntrySerializer().deserialize(primaryIndex, descriptor.version);
            DecoratedKey decoratedKey = partitioner.decorateKey(key);
            if (first == null)
                first = decoratedKey;
            last = decoratedKey;

            if (recreateBloomFilter)
                bf.add(decoratedKey.key);

            // if summary was already read from disk we don't want to re-populate it using primary index
            if (!summaryLoaded)
            {
                summaryBuilder.maybeAddEntry(decoratedKey, indexPosition);
                ibuilder.addPotentialBoundary(indexPosition);
                dbuilder.addPotentialBoundary(indexEntry.position);
            }
        }

        if (!summaryLoaded)
            indexSummary = summaryBuilder.build(partitioner);
    }
    finally
    {
        FileUtils.closeQuietly(primaryIndex);
    }

    first = getMinimalKey(first);
    last = getMinimalKey(last);
}
 
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:63,代码来源:SSTableReader.java

示例8: load

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
 */
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException
{
    boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
    SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
    SegmentedFile.Builder dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());

    // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
    BufferedRandomAccessFile input = new BufferedRandomAccessFile(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)),
                                                                  "r",
                                                                  BufferedRandomAccessFile.DEFAULT_BUFFER_SIZE,
                                                                  true);
    try
    {
        if (keyCache != null && keyCache.getCapacity() - keyCache.size() < keysToLoadInCache.size())
            keyCache.updateCapacity(keyCache.size() + keysToLoadInCache.size());

        long indexSize = input.length();
        long estimatedKeys = SSTable.estimateRowsFromIndex(input);
        indexSummary = new IndexSummary(estimatedKeys);
        if (recreatebloom)
            // estimate key count based on index length
            bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
        while (true)
        {
            long indexPosition = input.getFilePointer();
            if (indexPosition == indexSize)
                break;

            boolean shouldAddEntry = indexSummary.shouldAddEntry();
            ByteBuffer key = (shouldAddEntry || cacheLoading || recreatebloom)
                         ? ByteBufferUtil.readWithShortLength(input)
                         : ByteBufferUtil.skipShortLength(input);
            long dataPosition = input.readLong();
            if (key != null)
            {
                DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
                if (recreatebloom)
                    bf.add(decoratedKey.key);
                if (shouldAddEntry)
                    indexSummary.addEntry(decoratedKey, indexPosition);
                if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
                    cacheKey(decoratedKey, dataPosition);
            }

            indexSummary.incrementRowid();
            ibuilder.addPotentialBoundary(indexPosition);
            dbuilder.addPotentialBoundary(dataPosition);
        }
        indexSummary.complete();
    }
    finally
    {
        FileUtils.closeQuietly(input);
    }

    // finalize the state of the reader
    ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
    dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}
 
开发者ID:devdattakulkarni,项目名称:Cassandra-KVPM,代码行数:63,代码来源:SSTableReader.java

示例9: saveSummary

import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
 * Save index summary to Summary.db file.
 *
 * @param ibuilder
 * @param dbuilder
 */
public void saveSummary(SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
    saveSummary(ibuilder, dbuilder, indexSummary);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:11,代码来源:SSTableReader.java


注:本文中的org.apache.cassandra.io.util.SegmentedFile.Builder方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。