本文整理汇总了Java中org.apache.cassandra.io.sstable.metadata.MetadataCollector类的典型用法代码示例。如果您正苦于以下问题:Java MetadataCollector类的具体用法?Java MetadataCollector怎么用?Java MetadataCollector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MetadataCollector类属于org.apache.cassandra.io.sstable.metadata包,在下文中一共展示了MetadataCollector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: CompressedSequentialWriter
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public CompressedSequentialWriter(File file,
String offsetsPath,
CompressionParameters parameters,
MetadataCollector sstableMetadataCollector)
{
super(file, parameters.chunkLength());
this.compressor = parameters.sstableCompressor;
// buffer for compression should be the same size as buffer itself
compressed = new ICompressor.WrappedArray(new byte[compressor.initialCompressedBufferLength(buffer.length)]);
/* Index File (-CompressionInfo.db component) and it's header */
metadataWriter = CompressionMetadata.Writer.open(parameters, offsetsPath);
this.sstableMetadataCollector = sstableMetadataCollector;
crcMetadata = new DataIntegrityMetadata.ChecksumWriter(out);
}
示例2: writeFile
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
private SSTableReader writeFile(ColumnFamilyStore cfs, int count)
{
ArrayBackedSortedColumns cf = ArrayBackedSortedColumns.factory.create(cfs.metadata);
for (int i = 0; i < count; i++)
cf.addColumn(Util.column(String.valueOf(i), "a", 1));
File dir = cfs.directories.getDirectoryForNewSSTables();
String filename = cfs.getTempSSTablePath(dir);
SSTableWriter writer = new SSTableWriter(filename,
0,
0,
cfs.metadata,
StorageService.getPartitioner(),
new MetadataCollector(cfs.metadata.comparator));
for (int i = 0; i < count * 5; i++)
writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
return writer.closeAndOpenReader();
}
示例3: writeFile
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
private SSTableReader writeFile(ColumnFamilyStore cfs, int count)
{
ArrayBackedSortedColumns cf = ArrayBackedSortedColumns.factory.create(cfs.metadata);
for (int i = 0; i < count / 100; i++)
cf.addColumn(Util.cellname(i), random(0, 1000), 1);
File dir = cfs.directories.getDirectoryForNewSSTables();
String filename = cfs.getTempSSTablePath(dir);
SSTableWriter writer = new SSTableWriter(filename,
0,
0,
cfs.metadata,
StorageService.getPartitioner(),
new MetadataCollector(cfs.metadata.comparator));
for (int i = 0; i < count * 5; i++)
writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
return writer.closeAndOpenReader();
}
示例4: BigTableWriter
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public BigTableWriter(Descriptor descriptor,
Long keyCount,
Long repairedAt,
CFMetaData metadata,
MetadataCollector metadataCollector,
SerializationHeader header,
LifecycleTransaction txn)
{
super(descriptor, keyCount, repairedAt, metadata, metadataCollector, header);
txn.trackNew(this); // must track before any files are created
if (compression)
{
dataFile = SequentialWriter.open(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
metadata.params.compression,
metadataCollector);
dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
}
else
{
dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC)));
dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode(), false);
}
iwriter = new IndexWriter(keyCount, dataFile);
}
示例5: CompressedSequentialWriter
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public CompressedSequentialWriter(File file,
String offsetsPath,
CompressionParams parameters,
MetadataCollector sstableMetadataCollector)
{
super(file, parameters.chunkLength(), parameters.getSstableCompressor().preferredBufferType());
this.compressor = parameters.getSstableCompressor();
// buffer for compression should be the same size as buffer itself
compressed = compressor.preferredBufferType().allocate(compressor.initialCompressedBufferLength(buffer.capacity()));
/* Index File (-CompressionInfo.db component) and it's header */
metadataWriter = CompressionMetadata.Writer.open(parameters, offsetsPath);
this.sstableMetadataCollector = sstableMetadataCollector;
crcMetadata = new DataIntegrityMetadata.ChecksumWriter(new DataOutputStream(Channels.newOutputStream(channel)));
}
示例6: createCompactionWriter
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
private SSTableWriter createCompactionWriter(long repairedAt)
{
MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.getComparator());
// Get the max timestamp of the precompacted sstables
// and adds generation of live ancestors
// -- note that we always only have one SSTable in toUpgrade here:
for (SSTableReader sstable : toUpgrade)
{
sstableMetadataCollector.addAncestor(sstable.descriptor.generation);
for (Integer i : sstable.getAncestors())
{
if (new File(sstable.descriptor.withGeneration(i).filenameFor(Component.DATA)).exists())
sstableMetadataCollector.addAncestor(i);
}
sstableMetadataCollector.sstableLevel(sstable.getSSTableLevel());
}
return new SSTableWriter(cfs.getTempSSTablePath(directory), estimatedRows, repairedAt, cfs.metadata, cfs.partitioner, sstableMetadataCollector);
}
示例7: createCompactionWriter
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
private SSTableWriter createCompactionWriter()
{
MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.getComparator());
// Get the max timestamp of the precompacted sstables
// and adds generation of live ancestors
for (SSTableReader sstable : toUpgrade)
{
sstableMetadataCollector.addAncestor(sstable.descriptor.generation);
for (Integer i : sstable.getAncestors())
{
if (new File(sstable.descriptor.withGeneration(i).filenameFor(Component.DATA)).exists())
sstableMetadataCollector.addAncestor(i);
}
}
return new SSTableWriter(cfs.getTempSSTablePath(directory), estimatedRows, cfs.metadata, cfs.partitioner, sstableMetadataCollector);
}
示例8: CompressedSequentialWriter
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public CompressedSequentialWriter(File file,
String indexFilePath,
boolean skipIOCache,
CompressionParameters parameters,
MetadataCollector sstableMetadataCollector)
{
super(file, parameters.chunkLength(), skipIOCache);
this.compressor = parameters.sstableCompressor;
// buffer for compression should be the same size as buffer itself
compressed = new ICompressor.WrappedArray(new byte[compressor.initialCompressedBufferLength(buffer.length)]);
/* Index File (-CompressionInfo.db component) and it's header */
metadataWriter = CompressionMetadata.Writer.open(indexFilePath);
metadataWriter.writeHeader(parameters);
this.sstableMetadataCollector = sstableMetadataCollector;
}
示例9: CompressedSequentialWriter
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public CompressedSequentialWriter(File file,
String offsetsPath,
boolean skipIOCache,
CompressionParameters parameters,
MetadataCollector sstableMetadataCollector)
{
super(file, parameters.chunkLength(), skipIOCache);
this.compressor = parameters.sstableCompressor;
// buffer for compression should be the same size as buffer itself
compressed = new ICompressor.WrappedArray(new byte[compressor.initialCompressedBufferLength(buffer.length)]);
/* Index File (-CompressionInfo.db component) and it's header */
metadataWriter = CompressionMetadata.Writer.open(offsetsPath);
metadataWriter.writeHeader(parameters);
this.sstableMetadataCollector = sstableMetadataCollector;
crcMetadata = new DataIntegrityMetadata.ChecksumWriter(out);
}
示例10: create
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
@SuppressWarnings("resource") // SimpleSSTableMultiWriter closes writer
public static SSTableMultiWriter create(Descriptor descriptor,
long keyCount,
long repairedAt,
CFMetaData cfm,
MetadataCollector metadataCollector,
SerializationHeader header,
LifecycleTransaction txn)
{
SSTableWriter writer = SSTableWriter.create(descriptor, keyCount, repairedAt, cfm, metadataCollector, header, txn);
return new SimpleSSTableMultiWriter(writer, txn);
}
示例11: open
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
@Override
public SSTableWriter open(Descriptor descriptor,
long keyCount,
long repairedAt,
CFMetaData metadata,
MetadataCollector metadataCollector,
SerializationHeader header,
LifecycleTransaction txn)
{
return new BigTableWriter(descriptor, keyCount, repairedAt, metadata, metadataCollector, header);
}
示例12: BigTableWriter
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public BigTableWriter(Descriptor descriptor,
long keyCount,
long repairedAt,
CFMetaData metadata,
MetadataCollector metadataCollector,
SerializationHeader header)
{
super(descriptor, keyCount, repairedAt, metadata, metadataCollector, header);
//txn.trackNew(this); // must track before any files are created
if (compression)
{
dataFile = new CompressedSequentialWriter(getFilename(),
descriptor.filenameFor(Component.COMPRESSION_INFO),
descriptor.filenameFor(descriptor.digestComponent),
writerOption,
metadata.params.compression,
metadataCollector, descriptor.getConfiguration());
}
else
{
dataFile = new ChecksummedSequentialWriter(getFilename(),
descriptor.filenameFor(Component.CRC),
descriptor.filenameFor(descriptor.digestComponent),
writerOption,
descriptor.getConfiguration());
}
dbuilder = new FileHandle.Builder(descriptor.filenameFor(Component.DATA))
.withConfiguration(descriptor.getConfiguration())
.compressed(compression);
//chunkCache.ifPresent(dbuilder::withChunkCache);
iwriter = new IndexWriter(keyCount);
columnIndexWriter = new ColumnIndex(this.header, dataFile, descriptor.version, this.observers,
getRowIndexEntrySerializer().indexInfoSerializer());
}
示例13: SSTableWriter
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
protected SSTableWriter(Descriptor descriptor,
long keyCount,
long repairedAt,
CFMetaData metadata,
MetadataCollector metadataCollector,
SerializationHeader header)
{
super(descriptor, components(metadata), metadata, DatabaseDescriptor.getDiskOptimizationStrategy());
this.keyCount = keyCount;
this.repairedAt = repairedAt;
this.metadataCollector = metadataCollector;
this.header = header != null ? header : SerializationHeader.makeWithoutStats(metadata); //null header indicates streaming from pre-3.0 sstable
this.rowIndexEntrySerializer = descriptor.version.getSSTableFormat().getIndexSerializer(metadata, descriptor.version, header);
this.observers = Collections.emptySet();
}
示例14: create
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public static SSTableWriter create(Descriptor descriptor,
Long keyCount,
Long repairedAt,
CFMetaData metadata,
MetadataCollector metadataCollector,
SerializationHeader header,
LifecycleTransaction txn)
{
Factory writerFactory = descriptor.getFormat().getWriterFactory();
return writerFactory.open(descriptor, keyCount, repairedAt, metadata, metadataCollector, header, txn);
}
示例15: open
import org.apache.cassandra.io.sstable.metadata.MetadataCollector; //导入依赖的package包/类
public abstract SSTableWriter open(Descriptor descriptor,
long keyCount,
long repairedAt,
CFMetaData metadata,
MetadataCollector metadataCollector,
SerializationHeader header,
LifecycleTransaction txn);