本文整理汇总了Java中org.apache.cassandra.io.util.SegmentedFile类的典型用法代码示例。如果您正苦于以下问题:Java SegmentedFile类的具体用法?Java SegmentedFile怎么用?Java SegmentedFile使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SegmentedFile类属于org.apache.cassandra.io.util包,在下文中一共展示了SegmentedFile类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: internalOpen
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
IFilter bf,
long maxDataAge,
StatsMetadata sstableMetadata,
OpenReason openReason)
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata,
openReason);
}
示例2: SSTableReader
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
IFilter bloomFilter,
long maxDataAge,
StatsMetadata sstableMetadata,
OpenReason openReason)
{
this(desc, components, metadata, partitioner, maxDataAge, sstableMetadata, openReason);
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.setup(false);
}
示例3: internalOpen
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
IFilter bf,
long maxDataAge,
StatsMetadata sstableMetadata,
boolean isOpenEarly)
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata,
isOpenEarly);
}
示例4: SSTableReader
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
IFilter bloomFilter,
long maxDataAge,
StatsMetadata sstableMetadata,
boolean isOpenEarly)
{
this(desc, components, metadata, partitioner, maxDataAge, sstableMetadata, isOpenEarly);
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
}
示例5: load
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
/**
* Loads ifile, dfile and indexSummary, and optionally recreates the bloom filter.
* @param saveSummaryIfCreated for bulk loading purposes, if the summary was absent and needed to be built, you can
* avoid persisting it to disk by setting this to false
*/
private void load(boolean recreateBloomFilter, boolean saveSummaryIfCreated) throws IOException
{
SegmentedFile.Builder ibuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
SegmentedFile.Builder dbuilder = compression
? SegmentedFile.getCompressedBuilder()
: SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
boolean summaryLoaded = loadSummary(ibuilder, dbuilder);
if (recreateBloomFilter || !summaryLoaded)
buildSummary(recreateBloomFilter, ibuilder, dbuilder, summaryLoaded, Downsampling.BASE_SAMPLING_LEVEL);
ifile = ibuilder.complete(descriptor.filenameFor(Component.PRIMARY_INDEX));
dfile = dbuilder.complete(descriptor.filenameFor(Component.DATA));
if (saveSummaryIfCreated && (recreateBloomFilter || !summaryLoaded)) // save summary information to disk
saveSummary(ibuilder, dbuilder);
}
示例6: internalOpen
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
/**
* Open a RowIndexedReader which already has its state initialized (by SSTableWriter).
*/
static SSTableReader internalOpen(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary isummary,
IFilter bf,
long maxDataAge,
StatsMetadata sstableMetadata)
{
assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null;
return new SSTableReader(desc,
components,
metadata,
partitioner,
ifile, dfile,
isummary,
bf,
maxDataAge,
sstableMetadata);
}
示例7: SSTableReader
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
IFilter bloomFilter,
long maxDataAge,
StatsMetadata sstableMetadata)
{
this(desc, components, metadata, partitioner, maxDataAge, sstableMetadata);
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
}
示例8: closeAndOpenReader
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
public SSTableReader closeAndOpenReader(long maxDataAge) throws IOException
{
// index and filter
iwriter.close();
// main data
long position = dataFile.getFilePointer();
dataFile.close(); // calls force
FileUtils.truncate(dataFile.getPath(), position);
// write sstable statistics
writeMetadata(descriptor, estimatedRowSize, estimatedColumnCount, replayPosition);
// remove the 'tmp' marker from all components
final Descriptor newdesc = rename(descriptor, components);
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(SSTable.COMPONENT_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(SSTable.COMPONENT_DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc, components, metadata, replayPosition, partitioner, ifile, dfile, iwriter.summary, iwriter.bf, maxDataAge, estimatedRowSize, estimatedColumnCount);
iwriter = null;
dbuilder = null;
return sstable;
}
示例9: SSTableReader
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
ReplayPosition replayPosition,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
Filter bloomFilter,
long maxDataAge,
EstimatedHistogram rowSizes,
EstimatedHistogram columnCounts)
throws IOException
{
super(desc, components, metadata, replayPosition, partitioner, rowSizes, columnCounts);
this.maxDataAge = maxDataAge;
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
}
示例10: loadSummary
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
/**
* Load index summary from Summary.db file if it exists.
*
* if loaded index summary has different index interval from current value stored in schema,
* then Summary.db file will be deleted and this returns false to rebuild summary.
*
* @param ibuilder
* @param dbuilder
* @return true if index summary is loaded successfully from Summary.db file.
*/
public boolean loadSummary(SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try
{
iStream = new DataInputStream(new FileInputStream(summariesFile));
indexSummary = IndexSummary.serializer.deserialize(
iStream, partitioner, descriptor.version.hasSamplingLevel,
metadata.getMinIndexInterval(), metadata.getMaxIndexInterval());
first = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
last = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
}
catch (IOException e)
{
if (indexSummary != null)
indexSummary.close();
logger.debug("Cannot deserialize SSTable Summary File {}: {}", summariesFile.getPath(), e.getMessage());
// corrupted; delete it and fall back to creating a new summary
FileUtils.closeQuietly(iStream);
// delete it and fall back to creating a new summary
FileUtils.deleteWithConfirm(summariesFile);
return false;
}
finally
{
FileUtils.closeQuietly(iStream);
}
return true;
}
示例11: SSTableWriter
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
public SSTableWriter(String filename,
long keyCount,
long repairedAt,
CFMetaData metadata,
IPartitioner<?> partitioner,
MetadataCollector sstableMetadataCollector)
{
super(Descriptor.fromFilename(filename),
components(metadata),
metadata,
partitioner);
this.repairedAt = repairedAt;
iwriter = new IndexWriter(keyCount);
if (compression)
{
dataFile = SequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
metadata.compressionParameters(),
sstableMetadataCollector);
dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
}
else
{
dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC)));
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
}
this.sstableMetadataCollector = sstableMetadataCollector;
}
示例12: closeAndOpenReader
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
public SSTableReader closeAndOpenReader(long maxDataAge, long repairedAt)
{
Pair<Descriptor, StatsMetadata> p = close(repairedAt);
Descriptor newdesc = p.left;
StatsMetadata sstableMetadata = p.right;
// finalize in-memory state for the reader
SegmentedFile ifile = iwriter.builder.complete(newdesc.filenameFor(Component.PRIMARY_INDEX));
SegmentedFile dfile = dbuilder.complete(newdesc.filenameFor(Component.DATA));
SSTableReader sstable = SSTableReader.internalOpen(newdesc,
components,
metadata,
partitioner,
ifile,
dfile,
iwriter.summary.build(partitioner),
iwriter.bf,
maxDataAge,
sstableMetadata,
false);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
// try to save the summaries to disk
sstable.saveSummary(iwriter.builder, dbuilder);
iwriter = null;
dbuilder = null;
return sstable;
}
示例13: IndexWriter
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
IndexWriter(long keyCount)
{
indexFile = SequentialWriter.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode());
summary = new IndexSummaryBuilder(keyCount, metadata.getMinIndexInterval(), Downsampling.BASE_SAMPLING_LEVEL);
bf = FilterFactory.getFilter(keyCount, metadata.getBloomFilterFpChance(), true);
}
示例14: loadSummary
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
/**
* Load index summary from Summary.db file if it exists.
*
* if loaded index summary has different index interval from current value stored in schema,
* then Summary.db file will be deleted and this returns false to rebuild summary.
*
* @param ibuilder
* @param dbuilder
* @return true if index summary is loaded successfully from Summary.db file.
*/
public boolean loadSummary(SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder)
{
File summariesFile = new File(descriptor.filenameFor(Component.SUMMARY));
if (!summariesFile.exists())
return false;
DataInputStream iStream = null;
try
{
iStream = new DataInputStream(new FileInputStream(summariesFile));
indexSummary = IndexSummary.serializer.deserialize(iStream, partitioner, descriptor.version.hasSamplingLevel, metadata.getMinIndexInterval(), metadata.getMaxIndexInterval());
first = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
last = partitioner.decorateKey(ByteBufferUtil.readWithLength(iStream));
ibuilder.deserializeBounds(iStream);
dbuilder.deserializeBounds(iStream);
}
catch (IOException e)
{
logger.debug("Cannot deserialize SSTable Summary File {}: {}", summariesFile.getPath(), e.getMessage());
// corrupted; delete it and fall back to creating a new summary
FileUtils.closeQuietly(iStream);
// delete it and fall back to creating a new summary
FileUtils.deleteWithConfirm(summariesFile);
return false;
}
finally
{
FileUtils.closeQuietly(iStream);
}
return true;
}
示例15: SSTableWriter
import org.apache.cassandra.io.util.SegmentedFile; //导入依赖的package包/类
public SSTableWriter(String filename, long keyCount, CFMetaData metadata, IPartitioner partitioner, ReplayPosition replayPosition) throws IOException
{
super(Descriptor.fromFilename(filename),
new HashSet<Component>(Arrays.asList(Component.DATA, Component.FILTER, Component.PRIMARY_INDEX, Component.STATS)),
metadata,
replayPosition,
partitioner,
SSTable.defaultRowHistogram(),
SSTable.defaultColumnHistogram());
iwriter = new IndexWriter(descriptor, partitioner, keyCount);
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode());
dataFile = new BufferedRandomAccessFile(new File(getFilename()), "rw", BufferedRandomAccessFile.DEFAULT_BUFFER_SIZE, true);
}