本文整理汇总了Java中org.apache.cassandra.io.compress.CompressionMetadata类的典型用法代码示例。如果您正苦于以下问题:Java CompressionMetadata类的具体用法?Java CompressionMetadata怎么用?Java CompressionMetadata使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CompressionMetadata类属于org.apache.cassandra.io.compress包,在下文中一共展示了CompressionMetadata类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: runMayThrow
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
protected void runMayThrow() throws Exception
{
byte[] compressedWithCRC;
while (chunks.hasNext())
{
CompressionMetadata.Chunk chunk = chunks.next();
int readLength = chunk.length + 4; // read with CRC
compressedWithCRC = new byte[readLength];
int bufferRead = 0;
while (bufferRead < readLength)
{
int r = source.read(compressedWithCRC, bufferRead, readLength - bufferRead);
if (r < 0)
{
dataBuffer.put(POISON_PILL);
return; // throw exception where we consume dataBuffer
}
bufferRead += r;
}
dataBuffer.put(compressedWithCRC);
}
}
示例2: runMayThrow
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
protected void runMayThrow() throws Exception
{
byte[] compressedWithCRC;
while (chunks.hasNext())
{
CompressionMetadata.Chunk chunk = chunks.next();
int readLength = chunk.length + 4; // read with CRC
compressedWithCRC = new byte[readLength];
int bufferRead = 0;
while (bufferRead < readLength)
bufferRead += source.read(compressedWithCRC, bufferRead, readLength - bufferRead);
dataBuffer.put(compressedWithCRC);
}
}
示例3: FileMessageHeader
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
public FileMessageHeader(UUID cfId,
int sequenceNumber,
Version version,
SSTableFormat.Type format,
long estimatedKeys,
List<Pair<Long, Long>> sections,
CompressionMetadata compressionMetadata,
long repairedAt,
int sstableLevel,
SerializationHeader.Component header)
{
this.cfId = cfId;
this.sequenceNumber = sequenceNumber;
this.version = version;
this.format = format;
this.estimatedKeys = estimatedKeys;
this.sections = sections;
this.compressionInfo = null;
this.compressionMetadata = compressionMetadata;
this.repairedAt = repairedAt;
this.sstableLevel = sstableLevel;
this.header = header;
this.size = calculateSize();
}
示例4: calculateSize
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
private long calculateSize()
{
long transferSize = 0;
if (compressionInfo != null)
{
// calculate total length of transferring chunks
for (CompressionMetadata.Chunk chunk : compressionInfo.chunks)
transferSize += chunk.length + 4; // 4 bytes for CRC
}
else if (compressionMetadata != null)
{
transferSize = compressionMetadata.getTotalSizeForSections(sections);
}
else
{
for (Pair<Long, Long> section : sections)
transferSize += section.right - section.left;
}
return transferSize;
}
示例5: MmappedRegions
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
private MmappedRegions(State state, CompressionMetadata metadata, long length)
{
super(new Tidier(state));
this.state = state;
if (metadata != null)
{
assert length == 0 : "expected no length with metadata";
updateState(metadata);
}
else if (length > 0)
{
updateState(length);
}
this.copy = new State(state);
}
示例6: getCompressionMetadata
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
/**
* Returns the compression metadata for this sstable.
* @throws IllegalStateException if the sstable is not compressed
*/
public CompressionMetadata getCompressionMetadata()
{
if (!compression)
throw new IllegalStateException(this + " is not compressed");
return dfile.compressionMetadata().get();
}
示例7: FileHandle
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
private FileHandle(Cleanup cleanup,
ChannelProxy channel,
RebuffererFactory rebuffererFactory,
CompressionMetadata compressionMetadata,
long onDiskLength,
Configuration configuration)
{
super(cleanup);
this.rebuffererFactory = rebuffererFactory;
this.channel = channel;
this.compressionMetadata = Optional.ofNullable(compressionMetadata);
this.onDiskLength = onDiskLength;
this.conf = configuration;
}
示例8: Cleanup
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
private Cleanup(ChannelProxy channel,
RebuffererFactory rebufferer,
CompressionMetadata compressionMetadata,
ChunkCache chunkCache)
{
this.channel = channel;
this.rebufferer = rebufferer;
this.compressionMetadata = compressionMetadata;
this.chunkCache = Optional.ofNullable(chunkCache);
}
示例9: withCompressionMetadata
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
/**
* Provide {@link CompressionMetadata} to use when reading compressed file.
*
* @param metadata CompressionMetadata to use
* @return this object
*/
public Builder withCompressionMetadata(CompressionMetadata metadata)
{
this.compressed = Objects.nonNull(metadata);
this.compressionMetadata = metadata;
return this;
}
示例10: complete
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
/**
* Complete building {@link FileHandle} with the given length, which overrides the file length.
*
* @param overrideLength Override file length (in bytes) so that read cannot go further than this value.
* If the value is less than or equal to 0, then the value is ignored.
* @return Built file
*/
@SuppressWarnings("resource")
public FileHandle complete(long overrideLength)
{
ChannelProxy channelCopy = ChannelProxy.newInstance(path, this.conf);
try
{
if (compressed && compressionMetadata == null)
compressionMetadata = CompressionMetadata.create(channelCopy.filePath(),
channelCopy.size(),
this.conf);
long length = overrideLength > 0 ? overrideLength : compressed ? compressionMetadata.compressedFileLength : channelCopy.size();
RebuffererFactory rebuffererFactory;
if (compressed)
{
rebuffererFactory = maybeCached(new CompressedChunkReader.Standard(channelCopy, compressionMetadata));
}
else
{
rebuffererFactory = maybeCached(new SimpleChunkReader(channelCopy, length, bufferType, bufferSize));
}
Cleanup cleanup = new Cleanup(channelCopy, rebuffererFactory, compressionMetadata, chunkCache);
return new FileHandle(cleanup, channelCopy, rebuffererFactory, compressionMetadata, length, conf);
}
catch (Throwable t)
{
channelCopy.close();
throw t;
}
}
示例11: serialize
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
public void serialize(CompressionInfo info, DataOutputPlus out, int version) throws IOException
{
if (info == null)
{
out.writeInt(-1);
return;
}
int chunkCount = info.chunks.length;
out.writeInt(chunkCount);
for (int i = 0; i < chunkCount; i++)
CompressionMetadata.Chunk.serializer.serialize(info.chunks[i], out, version);
// compression params
CompressionParameters.serializer.serialize(info.parameters, out, version);
}
示例12: deserialize
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
public CompressionInfo deserialize(DataInput in, int version) throws IOException
{
// chunks
int chunkCount = in.readInt();
if (chunkCount < 0)
return null;
CompressionMetadata.Chunk[] chunks = new CompressionMetadata.Chunk[chunkCount];
for (int i = 0; i < chunkCount; i++)
chunks[i] = CompressionMetadata.Chunk.serializer.deserialize(in, version);
// compression params
CompressionParameters parameters = CompressionParameters.serializer.deserialize(in, version);
return new CompressionInfo(chunks, parameters);
}
示例13: serializedSize
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
public long serializedSize(CompressionInfo info, int version)
{
if (info == null)
return TypeSizes.NATIVE.sizeof(-1);
// chunks
int chunkCount = info.chunks.length;
long size = TypeSizes.NATIVE.sizeof(chunkCount);
for (int i = 0; i < chunkCount; i++)
size += CompressionMetadata.Chunk.serializer.serializedSize(info.chunks[i], version);
// compression params
size += CompressionParameters.serializer.serializedSize(info.parameters, version);
return size;
}
示例14: totalSize
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
@Override
protected long totalSize()
{
long size = 0;
// calculate total length of transferring chunks
for (CompressionMetadata.Chunk chunk : compressionInfo.chunks)
size += chunk.length + 4; // 4 bytes for CRC
return size;
}
示例15: getTransferSections
import org.apache.cassandra.io.compress.CompressionMetadata; //导入依赖的package包/类
private List<Pair<Long, Long>> getTransferSections(CompressionMetadata.Chunk[] chunks)
{
List<Pair<Long, Long>> transferSections = new ArrayList<>();
Pair<Long, Long> lastSection = null;
for (CompressionMetadata.Chunk chunk : chunks)
{
if (lastSection != null)
{
if (chunk.offset == lastSection.right)
{
// extend previous section to end of this chunk
lastSection = Pair.create(lastSection.left, chunk.offset + chunk.length + 4); // 4 bytes for CRC
}
else
{
transferSections.add(lastSection);
lastSection = Pair.create(chunk.offset, chunk.offset + chunk.length + 4);
}
}
else
{
lastSection = Pair.create(chunk.offset, chunk.offset + chunk.length + 4);
}
}
if (lastSection != null)
transferSections.add(lastSection);
return transferSections;
}