本文整理汇总了Java中org.apache.cassandra.io.util.RandomAccessReader类的典型用法代码示例。如果您正苦于以下问题:Java RandomAccessReader类的具体用法?Java RandomAccessReader怎么用?Java RandomAccessReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RandomAccessReader类属于org.apache.cassandra.io.util包,在下文中一共展示了RandomAccessReader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: estimateRowsFromIndex
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
/** @return An estimate of the number of keys contained in the given index file. */
protected long estimateRowsFromIndex(RandomAccessReader ifile) throws IOException
{
// collect sizes for the first 10000 keys, or first 10 megabytes of data
final int SAMPLES_CAP = 10000, BYTES_CAP = (int)Math.min(10000000, ifile.length());
int keys = 0;
while (ifile.getFilePointer() < BYTES_CAP && keys < SAMPLES_CAP)
{
ByteBufferUtil.skipShortLength(ifile);
RowIndexEntry.Serializer.skip(ifile, descriptor.version);
keys++;
}
assert keys > 0 && ifile.getFilePointer() > 0 && ifile.length() > 0 : "Unexpected empty index file: " + ifile;
long estimatedRows = ifile.length() / (ifile.getFilePointer() / keys);
ifile.seek(0);
return estimatedRows;
}
示例2: deserialize
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components;
logger.trace("Load metadata for {}", descriptor);
String statsFile = descriptor.filenameFor(Component.STATS);
if (!HadoopFileUtils.exists(statsFile, descriptor.getConfiguration()))
{
logger.trace("No sstable stats for {}", descriptor);
components = Maps.newHashMap();
components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
}
else
{
try (RandomAccessReader r = RandomAccessReader.open(statsFile, descriptor.getConfiguration()))
{
components = deserialize(descriptor, r, types);
}
}
return components;
}
示例3: put
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
public void put(CacheKey cacheKey, RandomAccessReader instance)
{
int memoryUsed = memoryUsage.get();
if (logger.isDebugEnabled())
logger.debug("Estimated memory usage is {} compared to actual usage {}", memoryUsed, sizeInBytes());
CacheBucket bucket = cache.getIfPresent(cacheKey);
if (memoryUsed >= MEMORY_USAGE_THRESHOLD || bucket == null)
{
instance.deallocate();
}
else
{
memoryUsage.addAndGet(instance.getTotalBufferSize());
bucket.queue.add(instance);
if (bucket.discarded)
{
RandomAccessReader reader = bucket.queue.poll();
if (reader != null)
{
memoryUsage.addAndGet(-1 * reader.getTotalBufferSize());
reader.deallocate();
}
}
}
}
示例4: buildSummaryAtLevel
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
private IndexSummary buildSummaryAtLevel(int newSamplingLevel) throws IOException
{
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
try
{
long indexSize = primaryIndex.length();
try (IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.getMinIndexInterval(), newSamplingLevel))
{
long indexPosition;
while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
{
summaryBuilder.maybeAddEntry(partitioner.decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition);
RowIndexEntry.Serializer.skip(primaryIndex);
}
return summaryBuilder.build(partitioner);
}
}
finally
{
FileUtils.closeQuietly(primaryIndex);
}
}
示例5: estimateRowsFromIndex
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
/** @return An estimate of the number of keys contained in the given index file. */
long estimateRowsFromIndex(RandomAccessReader ifile) throws IOException
{
// collect sizes for the first 10000 keys, or first 10 megabytes of data
final int SAMPLES_CAP = 10000, BYTES_CAP = (int)Math.min(10000000, ifile.length());
int keys = 0;
while (ifile.getFilePointer() < BYTES_CAP && keys < SAMPLES_CAP)
{
ByteBufferUtil.skipShortLength(ifile);
RowIndexEntry.Serializer.skip(ifile);
keys++;
}
assert keys > 0 && ifile.getFilePointer() > 0 && ifile.length() > 0 : "Unexpected empty index file: " + ifile;
long estimatedRows = ifile.length() / (ifile.getFilePointer() / keys);
ifile.seek(0);
return estimatedRows;
}
示例6: deserialize
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components;
logger.debug("Load metadata for {}", descriptor);
File statsFile = new File(descriptor.filenameFor(Component.STATS));
if (!statsFile.exists())
{
logger.debug("No sstable stats for {}", descriptor);
components = Maps.newHashMap();
components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
}
else
{
try (RandomAccessReader r = RandomAccessReader.open(statsFile))
{
components = deserialize(descriptor, r, types);
}
}
return components;
}
示例7: FileCacheService
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
protected FileCacheService()
{
cache = CacheBuilder.<String, Queue<RandomAccessReader>>newBuilder()
.expireAfterAccess(AFTER_ACCESS_EXPIRATION, TimeUnit.MILLISECONDS)
.concurrencyLevel(DatabaseDescriptor.getConcurrentReaders())
.removalListener(new RemovalListener<String, Queue<RandomAccessReader>>()
{
@Override
public void onRemoval(RemovalNotification<String, Queue<RandomAccessReader>> notification)
{
Queue<RandomAccessReader> cachedInstances = notification.getValue();
if (cachedInstances == null)
return;
for (RandomAccessReader reader : cachedInstances)
reader.deallocate();
}
})
.build();
}
示例8: estimateRowsFromIndex
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
/** @return An estimate of the number of keys contained in the given index file. */
long estimateRowsFromIndex(RandomAccessReader ifile) throws IOException
{
// collect sizes for the first 10000 keys, or first 10 megabytes of data
final int SAMPLES_CAP = 10000, BYTES_CAP = (int)Math.min(10000000, ifile.length());
int keys = 0;
while (ifile.getFilePointer() < BYTES_CAP && keys < SAMPLES_CAP)
{
ByteBufferUtil.skipShortLength(ifile);
RowIndexEntry.serializer.skip(ifile);
keys++;
}
assert keys > 0 && ifile.getFilePointer() > 0 && ifile.length() > 0 : "Unexpected empty index file: " + ifile;
long estimatedRows = ifile.length() / (ifile.getFilePointer() / keys);
ifile.seek(0);
return estimatedRows;
}
示例9: testSerializedSize
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
@Test
public void testSerializedSize() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(tokens).finish();
final File treeFile = File.createTempFile("token-tree-size-test", "tt");
treeFile.deleteOnExit();
final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
builder.write(writer);
writer.close();
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
Assert.assertEquals((int) reader.bytesRemaining(), builder.serializedSize());
}
示例10: skipPastEnd
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
@Test
public void skipPastEnd() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(simpleTokenMap).finish();
final File treeFile = File.createTempFile("token-tree-skip-past-test", "tt");
treeFile.deleteOnExit();
final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
builder.write(writer);
writer.close();
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
final RangeIterator<Long, Token> tokenTree = new TokenTree(new MappedBuffer(reader)).iterator(KEY_CONVERTER);
tokenTree.skipTo(simpleTokenMap.lastKey() + 10);
}
示例11: SSTableIdentityIterator
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
/**
* Used to iterate through the columns of a row.
* @param sstable SSTable we are reading ffrom.
* @param file Reading using this file.
* @param key Key of this row.
*/
public SSTableIdentityIterator(SSTableReader sstable, RandomAccessReader file, DecoratedKey key)
{
this.sstable = sstable;
this.filename = file.getPath();
this.key = key;
try
{
this.partitionLevelDeletion = DeletionTime.serializer.deserialize(file);
SerializationHelper helper = new SerializationHelper(sstable.metadata, sstable.descriptor.version.correspondingMessagingVersion(), SerializationHelper.Flag.LOCAL);
this.iterator = SSTableSimpleIterator.create(sstable.metadata, file, sstable.header, helper, partitionLevelDeletion);
this.staticRow = iterator.readStaticRow();
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, filename);
}
}
示例12: deserialize
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize( Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components;
logger.trace("Load metadata for {}", descriptor);
File statsFile = new File(descriptor.filenameFor(Component.STATS));
if (!statsFile.exists())
{
logger.trace("No sstable stats for {}", descriptor);
components = Maps.newHashMap();
components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
}
else
{
try (RandomAccessReader r = RandomAccessReader.open(statsFile))
{
components = deserialize(descriptor, r, types);
}
}
return components;
}
示例13: testGetPut
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
@Test
public void testGetPut() throws InterruptedException
{
final int size = RandomAccessReader.DEFAULT_BUFFER_SIZE;
ByteBuffer buffer = BufferPool.get(size);
assertNotNull(buffer);
assertEquals(size, buffer.capacity());
assertEquals(true, buffer.isDirect());
BufferPool.Chunk chunk = BufferPool.currentChunk();
assertNotNull(chunk);
assertEquals(BufferPool.GlobalPool.MACRO_CHUNK_SIZE, BufferPool.sizeInBytes());
BufferPool.put(buffer);
assertEquals(null, BufferPool.currentChunk());
assertEquals(BufferPool.GlobalPool.MACRO_CHUNK_SIZE, BufferPool.sizeInBytes());
}
示例14: testSerialization
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
@Test
public void testSerialization() throws IOException
{
Map<MetadataType, MetadataComponent> originalMetadata = constructMetadata();
MetadataSerializer serializer = new MetadataSerializer();
File statsFile = serialize(originalMetadata, serializer, BigFormat.latestVersion);
Descriptor desc = new Descriptor( statsFile.getParentFile(), "", "", 0);
try (RandomAccessReader in = RandomAccessReader.open(statsFile))
{
Map<MetadataType, MetadataComponent> deserialized = serializer.deserialize(desc, in, EnumSet.allOf(MetadataType.class));
for (MetadataType type : MetadataType.values())
{
assertEquals(originalMetadata.get(type), deserialized.get(type));
}
}
}
示例15: buildSummaryAtLevel
import org.apache.cassandra.io.util.RandomAccessReader; //导入依赖的package包/类
private IndexSummary buildSummaryAtLevel(int newSamplingLevel) throws IOException
{
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
try
{
long indexSize = primaryIndex.length();
IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.getMinIndexInterval(), newSamplingLevel);
long indexPosition;
while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
{
summaryBuilder.maybeAddEntry(partitioner.decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition);
RowIndexEntry.Serializer.skip(primaryIndex);
}
return summaryBuilder.build(partitioner);
}
finally
{
FileUtils.closeQuietly(primaryIndex);
}
}