当前位置: 首页>>代码示例>>Java>>正文


Java RandomAccessReader.open方法代码示例

本文整理汇总了Java中org.apache.cassandra.io.util.RandomAccessReader.open方法的典型用法代码示例。如果您正苦于以下问题:Java RandomAccessReader.open方法的具体用法?Java RandomAccessReader.open怎么用?Java RandomAccessReader.open使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.cassandra.io.util.RandomAccessReader的用法示例。


在下文中一共展示了RandomAccessReader.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: deserialize

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
    Map<MetadataType, MetadataComponent> components;
    logger.trace("Load metadata for {}", descriptor);
    String statsFile = descriptor.filenameFor(Component.STATS);

    if (!HadoopFileUtils.exists(statsFile, descriptor.getConfiguration()))
    {
        logger.trace("No sstable stats for {}", descriptor);
        components = Maps.newHashMap();
        components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
    }
    else
    {
        try (RandomAccessReader r = RandomAccessReader.open(statsFile, descriptor.getConfiguration()))
        {
            components = deserialize(descriptor, r, types);
        }
    }

    return components;
}
 
开发者ID:Netflix,项目名称:sstable-adaptor,代码行数:23,代码来源:MetadataSerializer.java

示例2: buildSummaryAtLevel

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
private IndexSummary buildSummaryAtLevel(int newSamplingLevel) throws IOException
{
    // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
    RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
    try
    {
        long indexSize = primaryIndex.length();
        try (IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.getMinIndexInterval(), newSamplingLevel))
        {
            long indexPosition;
            while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
            {
                summaryBuilder.maybeAddEntry(partitioner.decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition);
                RowIndexEntry.Serializer.skip(primaryIndex);
            }

            return summaryBuilder.build(partitioner);
        }
    }
    finally
    {
        FileUtils.closeQuietly(primaryIndex);
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:25,代码来源:SSTableReader.java

示例3: deserialize

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
    Map<MetadataType, MetadataComponent> components;
    logger.debug("Load metadata for {}", descriptor);
    File statsFile = new File(descriptor.filenameFor(Component.STATS));
    if (!statsFile.exists())
    {
        logger.debug("No sstable stats for {}", descriptor);
        components = Maps.newHashMap();
        components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
    }
    else
    {
        try (RandomAccessReader r = RandomAccessReader.open(statsFile))
        {
            components = deserialize(descriptor, r, types);
        }
    }
    return components;
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:21,代码来源:MetadataSerializer.java

示例4: testSerializedSize

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
@Test
public void testSerializedSize() throws Exception
{
    final TokenTreeBuilder builder = new TokenTreeBuilder(tokens).finish();

    final File treeFile = File.createTempFile("token-tree-size-test", "tt");
    treeFile.deleteOnExit();

    final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
    builder.write(writer);
    writer.close();


    final RandomAccessReader reader = RandomAccessReader.open(treeFile);
    Assert.assertEquals((int) reader.bytesRemaining(), builder.serializedSize());
}
 
开发者ID:xedin,项目名称:sasi,代码行数:17,代码来源:TokenTreeTest.java

示例5: skipPastEnd

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
@Test
public void skipPastEnd() throws Exception
{
    final TokenTreeBuilder builder = new TokenTreeBuilder(simpleTokenMap).finish();

    final File treeFile = File.createTempFile("token-tree-skip-past-test", "tt");
    treeFile.deleteOnExit();

    final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
    builder.write(writer);
    writer.close();

    final RandomAccessReader reader = RandomAccessReader.open(treeFile);
    final RangeIterator<Long, Token> tokenTree = new TokenTree(new MappedBuffer(reader)).iterator(KEY_CONVERTER);

    tokenTree.skipTo(simpleTokenMap.lastKey() + 10);
}
 
开发者ID:xedin,项目名称:sasi,代码行数:18,代码来源:TokenTreeTest.java

示例6: deserialize

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize( Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
    Map<MetadataType, MetadataComponent> components;
    logger.trace("Load metadata for {}", descriptor);
    File statsFile = new File(descriptor.filenameFor(Component.STATS));
    if (!statsFile.exists())
    {
        logger.trace("No sstable stats for {}", descriptor);
        components = Maps.newHashMap();
        components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
    }
    else
    {
        try (RandomAccessReader r = RandomAccessReader.open(statsFile))
        {
            components = deserialize(descriptor, r, types);
        }
    }
    return components;
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:21,代码来源:MetadataSerializer.java

示例7: testSerialization

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
@Test
public void testSerialization() throws IOException
{
    Map<MetadataType, MetadataComponent> originalMetadata = constructMetadata();

    MetadataSerializer serializer = new MetadataSerializer();
    File statsFile = serialize(originalMetadata, serializer, BigFormat.latestVersion);

    Descriptor desc = new Descriptor( statsFile.getParentFile(), "", "", 0);
    try (RandomAccessReader in = RandomAccessReader.open(statsFile))
    {
        Map<MetadataType, MetadataComponent> deserialized = serializer.deserialize(desc, in, EnumSet.allOf(MetadataType.class));

        for (MetadataType type : MetadataType.values())
        {
            assertEquals(originalMetadata.get(type), deserialized.get(type));
        }
    }
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:20,代码来源:MetadataSerializerTest.java

示例8: buildSummaryAtLevel

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
private IndexSummary buildSummaryAtLevel(int newSamplingLevel) throws IOException
{
    // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
    RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
    try
    {
        long indexSize = primaryIndex.length();
        IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.getMinIndexInterval(), newSamplingLevel);

        long indexPosition;
        while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
        {
            summaryBuilder.maybeAddEntry(partitioner.decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition);
            RowIndexEntry.Serializer.skip(primaryIndex);
        }

        return summaryBuilder.build(partitioner);
    }
    finally
    {
        FileUtils.closeQuietly(primaryIndex);
    }
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:24,代码来源:SSTableReader.java

示例9: Scrubber

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Scrubber(ColumnFamilyStore cfs, SSTableReader sstable, boolean skipCorrupted, OutputHandler outputHandler, boolean isOffline) throws IOException
{
    this.cfs = cfs;
    this.sstable = sstable;
    this.outputHandler = outputHandler;
    this.skipCorrupted = skipCorrupted;
    this.isOffline = isOffline;

    List<SSTableReader> toScrub = Collections.singletonList(sstable);

    // Calculate the expected compacted filesize
    this.destination = cfs.directories.getWriteableLocationAsFile(cfs.getExpectedCompactedFileSize(toScrub, OperationType.SCRUB));
    if (destination == null)
        throw new IOException("disk full");

    // If we run scrub offline, we should never purge tombstone, as we cannot know if other sstable have data that the tombstone deletes.
    this.controller = isOffline
                    ? new ScrubController(cfs)
                    : new CompactionController(cfs, Collections.singleton(sstable), CompactionManager.getDefaultGcBefore(cfs));
    this.isCommutative = cfs.metadata.isCounter();
    this.expectedBloomFilterSize = Math.max(cfs.metadata.getMinIndexInterval(), (int)(SSTableReader.getApproximateKeyCount(toScrub)));

    // loop through each row, deserializing to check for damage.
    // we'll also loop through the index at the same time, using the position from the index to recover if the
    // row header (key or data size) is corrupt. (This means our position in the index file will be one row
    // "ahead" of the data file.)
    this.dataFile = isOffline
                    ? sstable.openDataReader()
                    : sstable.openDataReader(CompactionManager.instance.getRateLimiter());
    this.indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)));
    this.scrubInfo = new ScrubInfo(dataFile, sstable);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:33,代码来源:Scrubber.java

示例10: Scrubber

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Scrubber(ColumnFamilyStore cfs, SSTableReader sstable, OutputHandler outputHandler, boolean isOffline) throws IOException
{
    this.cfs = cfs;
    this.sstable = sstable;
    this.outputHandler = outputHandler;

    // Calculate the expected compacted filesize
    this.destination = cfs.directories.getDirectoryForNewSSTables();
    if (destination == null)
        throw new IOException("disk full");

    List<SSTableReader> toScrub = Collections.singletonList(sstable);
    // If we run scrub offline, we should never purge tombstone, as we cannot know if other sstable have data that the tombstone deletes.
    this.controller = isOffline
                    ? new ScrubController(cfs)
                    : new CompactionController(cfs, Collections.singleton(sstable), CompactionManager.getDefaultGcBefore(cfs));
    this.isCommutative = cfs.metadata.getDefaultValidator().isCommutative();
    this.expectedBloomFilterSize = Math.max(cfs.metadata.getIndexInterval(), (int)(SSTableReader.getApproximateKeyCount(toScrub,cfs.metadata)));

    // loop through each row, deserializing to check for damage.
    // we'll also loop through the index at the same time, using the position from the index to recover if the
    // row header (key or data size) is corrupt. (This means our position in the index file will be one row
    // "ahead" of the data file.)
    this.dataFile = isOffline
                    ? sstable.openDataReader()
                    : sstable.openDataReader(CompactionManager.instance.getRateLimiter());
    this.indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)));
    this.scrubInfo = new ScrubInfo(dataFile, sstable);
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:30,代码来源:Scrubber.java

示例11: buildSerializeAndIterate

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
@Test
public void buildSerializeAndIterate() throws Exception
{
    final TokenTreeBuilder builder = new TokenTreeBuilder(simpleTokenMap).finish();

    final File treeFile = File.createTempFile("token-tree-iterate-test1", "tt");
    treeFile.deleteOnExit();

    final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
    builder.write(writer);
    writer.close();

    final RandomAccessReader reader = RandomAccessReader.open(treeFile);
    final TokenTree tokenTree = new TokenTree(new MappedBuffer(reader));

    final Iterator<Token> tokenIterator = tokenTree.iterator(KEY_CONVERTER);
    final Iterator<Map.Entry<Long, LongSet>> listIterator = simpleTokenMap.entrySet().iterator();
    while (tokenIterator.hasNext() && listIterator.hasNext())
    {
        Token treeNext = tokenIterator.next();
        Map.Entry<Long, LongSet> listNext = listIterator.next();

        Assert.assertEquals(listNext.getKey(), treeNext.get());
        Assert.assertEquals(convert(listNext.getValue()), convert(treeNext));
    }

    Assert.assertFalse("token iterator not finished", tokenIterator.hasNext());
    Assert.assertFalse("list iterator not finished", listIterator.hasNext());

    reader.close();
}
 
开发者ID:xedin,项目名称:sasi,代码行数:32,代码来源:TokenTreeTest.java

示例12: generateTree

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
private static TokenTree generateTree(final long minToken, final long maxToken) throws IOException
{
    final SortedMap<Long, LongSet> toks = new TreeMap<Long, LongSet>()
    {{
            for (long i = minToken; i <= maxToken; i++)
            {
                LongSet offsetSet = new LongOpenHashSet();
                offsetSet.add(i);
                put(i, offsetSet);
            }
    }};

    final TokenTreeBuilder builder = new TokenTreeBuilder(toks).finish();
    final File treeFile = File.createTempFile("token-tree-get-test", "tt");
    treeFile.deleteOnExit();

    final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
    builder.write(writer);
    writer.close();

    RandomAccessReader reader = null;

    try
    {
        reader = RandomAccessReader.open(treeFile);
        return new TokenTree(new MappedBuffer(reader));
    }
    finally
    {
        FileUtils.closeQuietly(reader);
    }
}
 
开发者ID:xedin,项目名称:sasi,代码行数:33,代码来源:TokenTreeTest.java

示例13: Verifier

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Verifier(ColumnFamilyStore cfs, SSTableReader sstable, OutputHandler outputHandler, boolean isOffline) throws IOException
{
    this.cfs = cfs;
    this.sstable = sstable;
    this.outputHandler = outputHandler;
    this.rowIndexEntrySerializer = sstable.descriptor.version.getSSTableFormat().getIndexSerializer(sstable.metadata, sstable.descriptor.version, sstable.header);

    this.controller = new VerifyController(cfs);

    this.dataFile = isOffline
                    ? sstable.openDataReader()
                    : sstable.openDataReader(CompactionManager.instance.getRateLimiter());
    this.indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)));
    this.verifyInfo = new VerifyInfo(dataFile, sstable);
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:16,代码来源:Verifier.java

示例14: testOldReadsNew

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public void testOldReadsNew(String oldV, String newV) throws IOException
{
    Map<MetadataType, MetadataComponent> originalMetadata = constructMetadata();

    MetadataSerializer serializer = new MetadataSerializer();
    // Write metadata in two minor formats.
    File statsFileLb = serialize(originalMetadata, serializer, BigFormat.instance.getVersion(newV));
    File statsFileLa = serialize(originalMetadata, serializer, BigFormat.instance.getVersion(oldV));
    // Reading both as earlier version should yield identical results.
    Descriptor desc = new Descriptor(oldV, statsFileLb.getParentFile(), "", "", 0, DatabaseDescriptor.getSSTableFormat());
    try (RandomAccessReader inLb = RandomAccessReader.open(statsFileLb);
         RandomAccessReader inLa = RandomAccessReader.open(statsFileLa))
    {
        Map<MetadataType, MetadataComponent> deserializedLb = serializer.deserialize(desc, inLb, EnumSet.allOf(MetadataType.class));
        Map<MetadataType, MetadataComponent> deserializedLa = serializer.deserialize(desc, inLa, EnumSet.allOf(MetadataType.class));

        for (MetadataType type : MetadataType.values())
        {
            assertEquals(deserializedLa.get(type), deserializedLb.get(type));
            if (!originalMetadata.get(type).equals(deserializedLb.get(type)))
            {
                // Currently only STATS can be different. Change if no longer the case
                assertEquals(MetadataType.STATS, type);
            }
        }
    }
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:28,代码来源:MetadataSerializerTest.java

示例15: Scrubber

import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Scrubber(ColumnFamilyStore cfs, SSTableReader sstable, boolean skipCorrupted, OutputHandler outputHandler, boolean isOffline) throws IOException
{
    this.cfs = cfs;
    this.sstable = sstable;
    this.outputHandler = outputHandler;
    this.skipCorrupted = skipCorrupted;

    // Calculate the expected compacted filesize
    this.destination = cfs.directories.getDirectoryForCompactedSSTables();
    if (destination == null)
        throw new IOException("disk full");

    List<SSTableReader> toScrub = Collections.singletonList(sstable);
    // If we run scrub offline, we should never purge tombstone, as we cannot know if other sstable have data that the tombstone deletes.
    this.controller = isOffline
                    ? new ScrubController(cfs)
                    : new CompactionController(cfs, Collections.singleton(sstable), CompactionManager.getDefaultGcBefore(cfs));
    this.isCommutative = cfs.metadata.isCounter();
    this.expectedBloomFilterSize = Math.max(cfs.metadata.getMinIndexInterval(), (int)(SSTableReader.getApproximateKeyCount(toScrub)));

    // loop through each row, deserializing to check for damage.
    // we'll also loop through the index at the same time, using the position from the index to recover if the
    // row header (key or data size) is corrupt. (This means our position in the index file will be one row
    // "ahead" of the data file.)
    this.dataFile = isOffline
                    ? sstable.openDataReader()
                    : sstable.openDataReader(CompactionManager.instance.getRateLimiter());
    this.indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)));
    this.scrubInfo = new ScrubInfo(dataFile, sstable);
}
 
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:31,代码来源:Scrubber.java


注:本文中的org.apache.cassandra.io.util.RandomAccessReader.open方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。