本文整理汇总了Java中org.apache.cassandra.io.util.RandomAccessReader.open方法的典型用法代码示例。如果您正苦于以下问题:Java RandomAccessReader.open方法的具体用法?Java RandomAccessReader.open怎么用?Java RandomAccessReader.open使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.io.util.RandomAccessReader
的用法示例。
在下文中一共展示了RandomAccessReader.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deserialize
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components;
logger.trace("Load metadata for {}", descriptor);
String statsFile = descriptor.filenameFor(Component.STATS);
if (!HadoopFileUtils.exists(statsFile, descriptor.getConfiguration()))
{
logger.trace("No sstable stats for {}", descriptor);
components = Maps.newHashMap();
components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
}
else
{
try (RandomAccessReader r = RandomAccessReader.open(statsFile, descriptor.getConfiguration()))
{
components = deserialize(descriptor, r, types);
}
}
return components;
}
示例2: buildSummaryAtLevel
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
private IndexSummary buildSummaryAtLevel(int newSamplingLevel) throws IOException
{
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
try
{
long indexSize = primaryIndex.length();
try (IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.getMinIndexInterval(), newSamplingLevel))
{
long indexPosition;
while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
{
summaryBuilder.maybeAddEntry(partitioner.decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition);
RowIndexEntry.Serializer.skip(primaryIndex);
}
return summaryBuilder.build(partitioner);
}
}
finally
{
FileUtils.closeQuietly(primaryIndex);
}
}
示例3: deserialize
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components;
logger.debug("Load metadata for {}", descriptor);
File statsFile = new File(descriptor.filenameFor(Component.STATS));
if (!statsFile.exists())
{
logger.debug("No sstable stats for {}", descriptor);
components = Maps.newHashMap();
components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
}
else
{
try (RandomAccessReader r = RandomAccessReader.open(statsFile))
{
components = deserialize(descriptor, r, types);
}
}
return components;
}
示例4: testSerializedSize
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
@Test
public void testSerializedSize() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(tokens).finish();
final File treeFile = File.createTempFile("token-tree-size-test", "tt");
treeFile.deleteOnExit();
final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
builder.write(writer);
writer.close();
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
Assert.assertEquals((int) reader.bytesRemaining(), builder.serializedSize());
}
示例5: skipPastEnd
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
@Test
public void skipPastEnd() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(simpleTokenMap).finish();
final File treeFile = File.createTempFile("token-tree-skip-past-test", "tt");
treeFile.deleteOnExit();
final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
builder.write(writer);
writer.close();
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
final RangeIterator<Long, Token> tokenTree = new TokenTree(new MappedBuffer(reader)).iterator(KEY_CONVERTER);
tokenTree.skipTo(simpleTokenMap.lastKey() + 10);
}
示例6: deserialize
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize( Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components;
logger.trace("Load metadata for {}", descriptor);
File statsFile = new File(descriptor.filenameFor(Component.STATS));
if (!statsFile.exists())
{
logger.trace("No sstable stats for {}", descriptor);
components = Maps.newHashMap();
components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
}
else
{
try (RandomAccessReader r = RandomAccessReader.open(statsFile))
{
components = deserialize(descriptor, r, types);
}
}
return components;
}
示例7: testSerialization
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
@Test
public void testSerialization() throws IOException
{
Map<MetadataType, MetadataComponent> originalMetadata = constructMetadata();
MetadataSerializer serializer = new MetadataSerializer();
File statsFile = serialize(originalMetadata, serializer, BigFormat.latestVersion);
Descriptor desc = new Descriptor( statsFile.getParentFile(), "", "", 0);
try (RandomAccessReader in = RandomAccessReader.open(statsFile))
{
Map<MetadataType, MetadataComponent> deserialized = serializer.deserialize(desc, in, EnumSet.allOf(MetadataType.class));
for (MetadataType type : MetadataType.values())
{
assertEquals(originalMetadata.get(type), deserialized.get(type));
}
}
}
示例8: buildSummaryAtLevel
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
private IndexSummary buildSummaryAtLevel(int newSamplingLevel) throws IOException
{
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
try
{
long indexSize = primaryIndex.length();
IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.getMinIndexInterval(), newSamplingLevel);
long indexPosition;
while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
{
summaryBuilder.maybeAddEntry(partitioner.decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition);
RowIndexEntry.Serializer.skip(primaryIndex);
}
return summaryBuilder.build(partitioner);
}
finally
{
FileUtils.closeQuietly(primaryIndex);
}
}
示例9: Scrubber
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Scrubber(ColumnFamilyStore cfs, SSTableReader sstable, boolean skipCorrupted, OutputHandler outputHandler, boolean isOffline) throws IOException
{
this.cfs = cfs;
this.sstable = sstable;
this.outputHandler = outputHandler;
this.skipCorrupted = skipCorrupted;
this.isOffline = isOffline;
List<SSTableReader> toScrub = Collections.singletonList(sstable);
// Calculate the expected compacted filesize
this.destination = cfs.directories.getWriteableLocationAsFile(cfs.getExpectedCompactedFileSize(toScrub, OperationType.SCRUB));
if (destination == null)
throw new IOException("disk full");
// If we run scrub offline, we should never purge tombstone, as we cannot know if other sstable have data that the tombstone deletes.
this.controller = isOffline
? new ScrubController(cfs)
: new CompactionController(cfs, Collections.singleton(sstable), CompactionManager.getDefaultGcBefore(cfs));
this.isCommutative = cfs.metadata.isCounter();
this.expectedBloomFilterSize = Math.max(cfs.metadata.getMinIndexInterval(), (int)(SSTableReader.getApproximateKeyCount(toScrub)));
// loop through each row, deserializing to check for damage.
// we'll also loop through the index at the same time, using the position from the index to recover if the
// row header (key or data size) is corrupt. (This means our position in the index file will be one row
// "ahead" of the data file.)
this.dataFile = isOffline
? sstable.openDataReader()
: sstable.openDataReader(CompactionManager.instance.getRateLimiter());
this.indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)));
this.scrubInfo = new ScrubInfo(dataFile, sstable);
}
示例10: Scrubber
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Scrubber(ColumnFamilyStore cfs, SSTableReader sstable, OutputHandler outputHandler, boolean isOffline) throws IOException
{
this.cfs = cfs;
this.sstable = sstable;
this.outputHandler = outputHandler;
// Calculate the expected compacted filesize
this.destination = cfs.directories.getDirectoryForNewSSTables();
if (destination == null)
throw new IOException("disk full");
List<SSTableReader> toScrub = Collections.singletonList(sstable);
// If we run scrub offline, we should never purge tombstone, as we cannot know if other sstable have data that the tombstone deletes.
this.controller = isOffline
? new ScrubController(cfs)
: new CompactionController(cfs, Collections.singleton(sstable), CompactionManager.getDefaultGcBefore(cfs));
this.isCommutative = cfs.metadata.getDefaultValidator().isCommutative();
this.expectedBloomFilterSize = Math.max(cfs.metadata.getIndexInterval(), (int)(SSTableReader.getApproximateKeyCount(toScrub,cfs.metadata)));
// loop through each row, deserializing to check for damage.
// we'll also loop through the index at the same time, using the position from the index to recover if the
// row header (key or data size) is corrupt. (This means our position in the index file will be one row
// "ahead" of the data file.)
this.dataFile = isOffline
? sstable.openDataReader()
: sstable.openDataReader(CompactionManager.instance.getRateLimiter());
this.indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)));
this.scrubInfo = new ScrubInfo(dataFile, sstable);
}
示例11: buildSerializeAndIterate
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
@Test
public void buildSerializeAndIterate() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(simpleTokenMap).finish();
final File treeFile = File.createTempFile("token-tree-iterate-test1", "tt");
treeFile.deleteOnExit();
final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
builder.write(writer);
writer.close();
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
final TokenTree tokenTree = new TokenTree(new MappedBuffer(reader));
final Iterator<Token> tokenIterator = tokenTree.iterator(KEY_CONVERTER);
final Iterator<Map.Entry<Long, LongSet>> listIterator = simpleTokenMap.entrySet().iterator();
while (tokenIterator.hasNext() && listIterator.hasNext())
{
Token treeNext = tokenIterator.next();
Map.Entry<Long, LongSet> listNext = listIterator.next();
Assert.assertEquals(listNext.getKey(), treeNext.get());
Assert.assertEquals(convert(listNext.getValue()), convert(treeNext));
}
Assert.assertFalse("token iterator not finished", tokenIterator.hasNext());
Assert.assertFalse("list iterator not finished", listIterator.hasNext());
reader.close();
}
示例12: generateTree
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
private static TokenTree generateTree(final long minToken, final long maxToken) throws IOException
{
final SortedMap<Long, LongSet> toks = new TreeMap<Long, LongSet>()
{{
for (long i = minToken; i <= maxToken; i++)
{
LongSet offsetSet = new LongOpenHashSet();
offsetSet.add(i);
put(i, offsetSet);
}
}};
final TokenTreeBuilder builder = new TokenTreeBuilder(toks).finish();
final File treeFile = File.createTempFile("token-tree-get-test", "tt");
treeFile.deleteOnExit();
final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
builder.write(writer);
writer.close();
RandomAccessReader reader = null;
try
{
reader = RandomAccessReader.open(treeFile);
return new TokenTree(new MappedBuffer(reader));
}
finally
{
FileUtils.closeQuietly(reader);
}
}
示例13: Verifier
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Verifier(ColumnFamilyStore cfs, SSTableReader sstable, OutputHandler outputHandler, boolean isOffline) throws IOException
{
this.cfs = cfs;
this.sstable = sstable;
this.outputHandler = outputHandler;
this.rowIndexEntrySerializer = sstable.descriptor.version.getSSTableFormat().getIndexSerializer(sstable.metadata, sstable.descriptor.version, sstable.header);
this.controller = new VerifyController(cfs);
this.dataFile = isOffline
? sstable.openDataReader()
: sstable.openDataReader(CompactionManager.instance.getRateLimiter());
this.indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)));
this.verifyInfo = new VerifyInfo(dataFile, sstable);
}
示例14: testOldReadsNew
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public void testOldReadsNew(String oldV, String newV) throws IOException
{
Map<MetadataType, MetadataComponent> originalMetadata = constructMetadata();
MetadataSerializer serializer = new MetadataSerializer();
// Write metadata in two minor formats.
File statsFileLb = serialize(originalMetadata, serializer, BigFormat.instance.getVersion(newV));
File statsFileLa = serialize(originalMetadata, serializer, BigFormat.instance.getVersion(oldV));
// Reading both as earlier version should yield identical results.
Descriptor desc = new Descriptor(oldV, statsFileLb.getParentFile(), "", "", 0, DatabaseDescriptor.getSSTableFormat());
try (RandomAccessReader inLb = RandomAccessReader.open(statsFileLb);
RandomAccessReader inLa = RandomAccessReader.open(statsFileLa))
{
Map<MetadataType, MetadataComponent> deserializedLb = serializer.deserialize(desc, inLb, EnumSet.allOf(MetadataType.class));
Map<MetadataType, MetadataComponent> deserializedLa = serializer.deserialize(desc, inLa, EnumSet.allOf(MetadataType.class));
for (MetadataType type : MetadataType.values())
{
assertEquals(deserializedLa.get(type), deserializedLb.get(type));
if (!originalMetadata.get(type).equals(deserializedLb.get(type)))
{
// Currently only STATS can be different. Change if no longer the case
assertEquals(MetadataType.STATS, type);
}
}
}
}
示例15: Scrubber
import org.apache.cassandra.io.util.RandomAccessReader; //导入方法依赖的package包/类
public Scrubber(ColumnFamilyStore cfs, SSTableReader sstable, boolean skipCorrupted, OutputHandler outputHandler, boolean isOffline) throws IOException
{
this.cfs = cfs;
this.sstable = sstable;
this.outputHandler = outputHandler;
this.skipCorrupted = skipCorrupted;
// Calculate the expected compacted filesize
this.destination = cfs.directories.getDirectoryForCompactedSSTables();
if (destination == null)
throw new IOException("disk full");
List<SSTableReader> toScrub = Collections.singletonList(sstable);
// If we run scrub offline, we should never purge tombstone, as we cannot know if other sstable have data that the tombstone deletes.
this.controller = isOffline
? new ScrubController(cfs)
: new CompactionController(cfs, Collections.singleton(sstable), CompactionManager.getDefaultGcBefore(cfs));
this.isCommutative = cfs.metadata.isCounter();
this.expectedBloomFilterSize = Math.max(cfs.metadata.getMinIndexInterval(), (int)(SSTableReader.getApproximateKeyCount(toScrub)));
// loop through each row, deserializing to check for damage.
// we'll also loop through the index at the same time, using the position from the index to recover if the
// row header (key or data size) is corrupt. (This means our position in the index file will be one row
// "ahead" of the data file.)
this.dataFile = isOffline
? sstable.openDataReader()
: sstable.openDataReader(CompactionManager.instance.getRateLimiter());
this.indexFile = RandomAccessReader.open(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)));
this.scrubInfo = new ScrubInfo(dataFile, sstable);
}