本文整理汇总了Java中org.apache.cassandra.io.util.DataIntegrityMetadata类的典型用法代码示例。如果您正苦于以下问题:Java DataIntegrityMetadata类的具体用法?Java DataIntegrityMetadata怎么用?Java DataIntegrityMetadata使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DataIntegrityMetadata类属于org.apache.cassandra.io.util包,在下文中一共展示了DataIntegrityMetadata类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: CompressedSequentialWriter
import org.apache.cassandra.io.util.DataIntegrityMetadata; //导入依赖的package包/类
public CompressedSequentialWriter(File file,
String offsetsPath,
CompressionParameters parameters,
MetadataCollector sstableMetadataCollector)
{
super(file, parameters.chunkLength());
this.compressor = parameters.sstableCompressor;
// buffer for compression should be the same size as buffer itself
compressed = new ICompressor.WrappedArray(new byte[compressor.initialCompressedBufferLength(buffer.length)]);
/* Index File (-CompressionInfo.db component) and it's header */
metadataWriter = CompressionMetadata.Writer.open(parameters, offsetsPath);
this.sstableMetadataCollector = sstableMetadataCollector;
crcMetadata = new DataIntegrityMetadata.ChecksumWriter(out);
}
示例2: CompressedSequentialWriter
import org.apache.cassandra.io.util.DataIntegrityMetadata; //导入依赖的package包/类
public CompressedSequentialWriter(File file,
String offsetsPath,
CompressionParams parameters,
MetadataCollector sstableMetadataCollector)
{
super(file, parameters.chunkLength(), parameters.getSstableCompressor().preferredBufferType());
this.compressor = parameters.getSstableCompressor();
// buffer for compression should be the same size as buffer itself
compressed = compressor.preferredBufferType().allocate(compressor.initialCompressedBufferLength(buffer.capacity()));
/* Index File (-CompressionInfo.db component) and it's header */
metadataWriter = CompressionMetadata.Writer.open(parameters, offsetsPath);
this.sstableMetadataCollector = sstableMetadataCollector;
crcMetadata = new DataIntegrityMetadata.ChecksumWriter(new DataOutputStream(Channels.newOutputStream(channel)));
}
示例3: CompressedSequentialWriter
import org.apache.cassandra.io.util.DataIntegrityMetadata; //导入依赖的package包/类
public CompressedSequentialWriter(File file,
String offsetsPath,
boolean skipIOCache,
CompressionParameters parameters,
MetadataCollector sstableMetadataCollector)
{
super(file, parameters.chunkLength(), skipIOCache);
this.compressor = parameters.sstableCompressor;
// buffer for compression should be the same size as buffer itself
compressed = new ICompressor.WrappedArray(new byte[compressor.initialCompressedBufferLength(buffer.length)]);
/* Index File (-CompressionInfo.db component) and it's header */
metadataWriter = CompressionMetadata.Writer.open(offsetsPath);
metadataWriter.writeHeader(parameters);
this.sstableMetadataCollector = sstableMetadataCollector;
crcMetadata = new DataIntegrityMetadata.ChecksumWriter(out);
}
示例4: write
import org.apache.cassandra.io.util.DataIntegrityMetadata; //导入依赖的package包/类
/**
* Stream file of specified sections to given channel.
*
* StreamWriter uses LZF compression on wire to decrease size to transfer.
*
* @param channel where this writes data to
* @throws IOException on any I/O error
*/
public void write(WritableByteChannel channel) throws IOException
{
long totalSize = totalSize();
RandomAccessReader file = sstable.openDataReader();
ChecksumValidator validator = new File(sstable.descriptor.filenameFor(Component.CRC)).exists()
? DataIntegrityMetadata.checksumValidator(sstable.descriptor)
: null;
transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];
// setting up data compression stream
compressedOutput = new LZFOutputStream(Channels.newOutputStream(channel));
long progress = 0L;
try
{
// stream each of the required sections of the file
for (Pair<Long, Long> section : sections)
{
long start = validator == null ? section.left : validator.chunkStart(section.left);
int readOffset = (int) (section.left - start);
// seek to the beginning of the section
file.seek(start);
if (validator != null)
validator.seek(start);
// length of the section to read
long length = section.right - start;
// tracks write progress
long bytesRead = 0;
while (bytesRead < length)
{
long lastBytesRead = write(file, validator, readOffset, length, bytesRead);
bytesRead += lastBytesRead;
progress += (lastBytesRead - readOffset);
session.progress(sstable.descriptor, ProgressInfo.Direction.OUT, progress, totalSize);
readOffset = 0;
}
// make sure that current section is send
compressedOutput.flush();
}
}
finally
{
// no matter what happens close file
FileUtils.closeQuietly(file);
FileUtils.closeQuietly(validator);
}
}
示例5: write
import org.apache.cassandra.io.util.DataIntegrityMetadata; //导入依赖的package包/类
/**
* Stream file of specified sections to given channel.
*
* StreamWriter uses LZF compression on wire to decrease size to transfer.
*
* @param channel where this writes data to
* @throws IOException on any I/O error
*/
public void write(WritableByteChannel channel) throws IOException
{
long totalSize = totalSize();
RandomAccessReader file = sstable.openDataReader();
ChecksumValidator validator = null;
if (new File(sstable.descriptor.filenameFor(Component.CRC)).exists())
validator = DataIntegrityMetadata.checksumValidator(sstable.descriptor);
transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];
// setting up data compression stream
compressedOutput = new LZFOutputStream(Channels.newOutputStream(channel));
long progress = 0L;
try
{
// stream each of the required sections of the file
for (Pair<Long, Long> section : sections)
{
long start = validator == null ? section.left : validator.chunkStart(section.left);
int skipBytes = (int) (section.left - start);
// seek to the beginning of the section
file.seek(start);
if (validator != null)
validator.seek(start);
// length of the section to read
long length = section.right - start;
// tracks write progress
long bytesTransferred = 0;
while (bytesTransferred < length)
{
long lastWrite = write(file, validator, skipBytes, length, bytesTransferred);
bytesTransferred += lastWrite;
progress += lastWrite;
session.progress(sstable.descriptor, ProgressInfo.Direction.OUT, progress, totalSize);
skipBytes = 0;
}
// make sure that current section is send
compressedOutput.flush();
}
}
finally
{
// no matter what happens close file
FileUtils.closeQuietly(file);
}
// release reference only when completed successfully
sstable.releaseReference();
}
示例6: write
import org.apache.cassandra.io.util.DataIntegrityMetadata; //导入依赖的package包/类
/**
* Stream file of specified sections to given channel.
*
* StreamWriter uses LZF compression on wire to decrease size to transfer.
*
* @param output where this writes data to
* @throws IOException on any I/O error
*/
public void write(DataOutputStreamPlus output) throws IOException
{
long totalSize = totalSize();
logger.debug("[Stream #{}] Start streaming file {} to {}, repairedAt = {}, totalSize = {}", session.planId(),
sstable.getFilename(), session.peer, sstable.getSSTableMetadata().repairedAt, totalSize);
try(RandomAccessReader file = sstable.openDataReader();
ChecksumValidator validator = new File(sstable.descriptor.filenameFor(Component.CRC)).exists()
? DataIntegrityMetadata.checksumValidator(sstable.descriptor)
: null;)
{
transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];
// setting up data compression stream
compressedOutput = new LZFOutputStream(output);
long progress = 0L;
// stream each of the required sections of the file
for (Pair<Long, Long> section : sections)
{
long start = validator == null ? section.left : validator.chunkStart(section.left);
int readOffset = (int) (section.left - start);
// seek to the beginning of the section
file.seek(start);
if (validator != null)
validator.seek(start);
// length of the section to read
long length = section.right - start;
// tracks write progress
long bytesRead = 0;
while (bytesRead < length)
{
long lastBytesRead = write(file, validator, readOffset, length, bytesRead);
bytesRead += lastBytesRead;
progress += (lastBytesRead - readOffset);
session.progress(sstable.descriptor, ProgressInfo.Direction.OUT, progress, totalSize);
readOffset = 0;
}
// make sure that current section is sent
compressedOutput.flush();
}
logger.debug("[Stream #{}] Finished streaming file {} to {}, bytesTransferred = {}, totalSize = {}",
session.planId(), sstable.getFilename(), session.peer, progress, totalSize);
}
}
示例7: write
import org.apache.cassandra.io.util.DataIntegrityMetadata; //导入依赖的package包/类
/**
* Stream file of specified sections to given channel.
*
* StreamWriter uses LZF compression on wire to decrease size to transfer.
*
* @param channel where this writes data to
* @throws IOException on any I/O error
*/
public void write(WritableByteChannel channel) throws IOException
{
long totalSize = totalSize();
RandomAccessReader file = sstable.openDataReader();
ChecksumValidator validator = new File(sstable.descriptor.filenameFor(Component.CRC)).exists()
? DataIntegrityMetadata.checksumValidator(sstable.descriptor)
: null;
transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];
// setting up data compression stream
compressedOutput = new LZFOutputStream(Channels.newOutputStream(channel));
long progress = 0L;
try
{
// stream each of the required sections of the file
for (Pair<Long, Long> section : sections)
{
long start = validator == null ? section.left : validator.chunkStart(section.left);
int skipBytes = (int) (section.left - start);
// seek to the beginning of the section
file.seek(start);
if (validator != null)
validator.seek(start);
// length of the section to read
long length = section.right - start;
// tracks write progress
long bytesTransferred = 0;
while (bytesTransferred < length)
{
long lastWrite = write(file, validator, skipBytes, length, bytesTransferred);
bytesTransferred += lastWrite;
progress += lastWrite;
session.progress(sstable.descriptor, ProgressInfo.Direction.OUT, progress, totalSize);
skipBytes = 0;
}
// make sure that current section is send
compressedOutput.flush();
}
}
finally
{
// no matter what happens close file
FileUtils.closeQuietly(file);
FileUtils.closeQuietly(validator);
}
}