本文整理汇总了Java中org.apache.cassandra.io.util.SegmentedFile.getBuilder方法的典型用法代码示例。如果您正苦于以下问题:Java SegmentedFile.getBuilder方法的具体用法?Java SegmentedFile.getBuilder怎么用?Java SegmentedFile.getBuilder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.io.util.SegmentedFile
的用法示例。
在下文中一共展示了SegmentedFile.getBuilder方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: load
import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
* @param saveSummaryIfCreated for bulk loading purposes, if the summary was absent and needed to be built, you can
* avoid persisting it to disk by setting this to false
*/
private void load(boolean recreateBloomFilter, boolean saveSummaryIfCreated) throws IOException
{
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
boolean summaryLoaded = loadSummary(ibuilder, dbuilder);
if (recreateBloomFilter || !summaryLoaded)
buildSummary(recreateBloomFilter, ibuilder, dbuilder, summaryLoaded, Downsampling.BASE_SAMPLING_LEVEL);
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (saveSummaryIfCreated && (recreateBloomFilter || !summaryLoaded)) // save summary information to disk
saveSummary(ibuilder, dbuilder);
}
示例2: SSTableWriter
import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
public SSTableWriter(String filename,
long keyCount,
long repairedAt,
CFMetaData metadata,
IPartitioner<?> partitioner,
MetadataCollector sstableMetadataCollector)
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
this.repairedAt = repairedAt;
iwriter = new IndexWriter(keyCount);
if (compression)
{
dataFile = SequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
metadata.compressionParameters(),
sstableMetadataCollector);
dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
}
else
{
dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC)));
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
示例3: IndexWriter
import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
IndexWriter(long keyCount)
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummaryBuilder(keyCount, metadata.getMinIndexInterval(), Downsampling.BASE_SAMPLING_LEVEL);
bf = FilterFactory.getFilter(keyCount, metadata.getBloomFilterFpChance(), true);
}
示例4: SSTableWriter
import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
public SSTableWriter(String filename, long keyCount, CFMetaData metadata, IPartitioner partitioner, ReplayPosition replayPosition) throws IOException
{
super(Descriptor.fromFilename(filename),
new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS)),
metadata,
replayPosition,
partitioner,
SSTable.defaultRowHistogram(),
SSTable.defaultColumnHistogram());
iwriter = new IndexWriter(descriptor, partitioner, keyCount);
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = new BufferedRandomAccessFile(new File(getFilename()), "rw", BufferedRandomAccessFile.DEFAULT_BUFFER_SIZE, true);
}
示例5: IndexWriter
import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
IndexWriter(Descriptor desc, IPartitioner part, long keyCount) throws IOException
{
this.desc = desc;
this.partitioner = part;
indexFile = new BufferedRandomAccessFile(new File(desc.filenameFor(SSTable.COMPONENT_INDEX)), "rw", 8 * 1024 * 1024, true);
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummary(keyCount);
bf = BloomFilter.getFilter(keyCount, 15);
}
示例6: load
import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
* @param saveSummaryIfCreated for bulk loading purposes, if the summary was absent and needed to be built, you can
* avoid persisting it to disk by setting this to false
*/
private void load(boolean recreateBloomFilter, boolean saveSummaryIfCreated) throws IOException
{
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
boolean summaryLoaded = loadSummary(ibuilder, dbuilder);
boolean builtSummary = false;
if (recreateBloomFilter || !summaryLoaded)
{
buildSummary(recreateBloomFilter, ibuilder, dbuilder, summaryLoaded, Downsampling.BASE_SAMPLING_LEVEL);
builtSummary = true;
}
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
// Check for an index summary that was downsampled even though the serialization format doesn't support
// that. If it was downsampled, rebuild it. See CASSANDRA-8993 for details.
if (!descriptor.version.hasSamplingLevel && !builtSummary && !validateSummarySamplingLevel())
{
indexSummary.close();
ifile.close();
dfile.close();
logger.info("Detected erroneously downsampled index summary; will rebuild summary at full sampling");
FileUtils.deleteWithConfirm(new File(descriptor.filenameFor(Component.SUMMARY)));
ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
buildSummary(false, ibuilder, dbuilder, false, Downsampling.BASE_SAMPLING_LEVEL);
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
saveSummary(ibuilder, dbuilder);
}
else if (saveSummaryIfCreated && builtSummary)
{
saveSummary(ibuilder, dbuilder);
}
}
示例7: load
import org.apache.cassandra.io.util.SegmentedFile; //导入方法依赖的package包/类
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
*/
private void load(boolean recreatebloom, Set<DecoratedKey> keysToLoadInCache) throws IOException
{
boolean cacheLoading = keyCache != null && !keysToLoadInCache.isEmpty();
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
// we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary.
BufferedRandomAccessFile input = new BufferedRandomAccessFile(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)),
"r",
BufferedRandomAccessFile.DEFAULT_BUFFER_SIZE,
true);
try
{
if (keyCache != null && keyCache.getCapacity() - keyCache.size() < keysToLoadInCache.size())
keyCache.updateCapacity(keyCache.size() + keysToLoadInCache.size());
long indexSize = input.length();
long estimatedKeys = SSTable.estimateRowsFromIndex(input);
indexSummary = new IndexSummary(estimatedKeys);
if (recreatebloom)
// estimate key count based on index length
bf = LegacyBloomFilter.getFilter(estimatedKeys, 15);
while (true)
{
long indexPosition = input.getFilePointer();
if (indexPosition == indexSize)
break;
boolean shouldAddEntry = indexSummary.shouldAddEntry();
ByteBuffer key = (shouldAddEntry || cacheLoading || recreatebloom)
? ByteBufferUtil.readWithShortLength(input)
: ByteBufferUtil.skipShortLength(input);
long dataPosition = input.readLong();
if (key != null)
{
DecoratedKey decoratedKey = decodeKey(partitioner, descriptor, key);
if (recreatebloom)
bf.add(decoratedKey.key);
if (shouldAddEntry)
indexSummary.addEntry(decoratedKey, indexPosition);
if (cacheLoading && keysToLoadInCache.contains(decoratedKey))
cacheKey(decoratedKey, dataPosition);
}
indexSummary.incrementRowid();
ibuilder.addPotentialBoundary(indexPosition);
dbuilder.addPotentialBoundary(dataPosition);
}
indexSummary.complete();
}
finally
{
FileUtils.closeQuietly(input);
}
// finalize the state of the reader
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
}