当前位置: 首页>>代码示例>>Java>>正文


Java CompressionMetadata.Chunk方法代码示例

本文整理汇总了Java中org.apache.cassandra.io.compress.CompressionMetadata.Chunk方法的典型用法代码示例。如果您正苦于以下问题:Java CompressionMetadata.Chunk方法的具体用法?Java CompressionMetadata.Chunk怎么用?Java CompressionMetadata.Chunk使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.cassandra.io.compress.CompressionMetadata的用法示例。


在下文中一共展示了CompressionMetadata.Chunk方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: runMayThrow

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
protected void runMayThrow() throws Exception
{
    byte[] compressedWithCRC;
    while (chunks.hasNext())
    {
        CompressionMetadata.Chunk chunk = chunks.next();

        int readLength = chunk.length + 4; // read with CRC
        compressedWithCRC = new byte[readLength];

        int bufferRead = 0;
        while (bufferRead < readLength)
        {
            int r = source.read(compressedWithCRC, bufferRead, readLength - bufferRead);
            if (r < 0)
            {
                dataBuffer.put(POISON_PILL);
                return; // throw exception where we consume dataBuffer
            }
            bufferRead += r;
        }
        dataBuffer.put(compressedWithCRC);
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:25,代码来源:CompressedInputStream.java

示例2: runMayThrow

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
protected void runMayThrow() throws Exception
{
    byte[] compressedWithCRC;
    while (chunks.hasNext())
    {
        CompressionMetadata.Chunk chunk = chunks.next();

        int readLength = chunk.length + 4; // read with CRC
        compressedWithCRC = new byte[readLength];

        int bufferRead = 0;
        while (bufferRead < readLength)
            bufferRead += source.read(compressedWithCRC, bufferRead, readLength - bufferRead);
        dataBuffer.put(compressedWithCRC);
    }
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:17,代码来源:CompressedInputStream.java

示例3: calculateSize

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
private long calculateSize()
{
    long transferSize = 0;
    if (compressionInfo != null)
    {
        // calculate total length of transferring chunks
        for (CompressionMetadata.Chunk chunk : compressionInfo.chunks)
            transferSize += chunk.length + 4; // 4 bytes for CRC
    }
    else if (compressionMetadata != null)
    {
        transferSize = compressionMetadata.getTotalSizeForSections(sections);
    }
    else
    {
        for (Pair<Long, Long> section : sections)
            transferSize += section.right - section.left;
    }
    return transferSize;
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:21,代码来源:FileMessageHeader.java

示例4: deserialize

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
public CompressionInfo deserialize(DataInput in, int version) throws IOException
{
    // chunks
    int chunkCount = in.readInt();
    if (chunkCount < 0)
        return null;

    CompressionMetadata.Chunk[] chunks = new CompressionMetadata.Chunk[chunkCount];
    for (int i = 0; i < chunkCount; i++)
        chunks[i] = CompressionMetadata.Chunk.serializer.deserialize(in, version);

    // compression params
    CompressionParameters parameters = CompressionParameters.serializer.deserialize(in, version);
    return new CompressionInfo(chunks, parameters);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:16,代码来源:CompressionInfo.java

示例5: totalSize

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
@Override
protected long totalSize()
{
    long size = 0;
    // calculate total length of transferring chunks
    for (CompressionMetadata.Chunk chunk : compressionInfo.chunks)
        size += chunk.length + 4; // 4 bytes for CRC
    return size;
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:10,代码来源:CompressedStreamWriter.java

示例6: getTransferSections

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
private List<Pair<Long, Long>> getTransferSections(CompressionMetadata.Chunk[] chunks)
{
    List<Pair<Long, Long>> transferSections = new ArrayList<>();
    Pair<Long, Long> lastSection = null;
    for (CompressionMetadata.Chunk chunk : chunks)
    {
        if (lastSection != null)
        {
            if (chunk.offset == lastSection.right)
            {
                // extend previous section to end of this chunk
                lastSection = Pair.create(lastSection.left, chunk.offset + chunk.length + 4); // 4 bytes for CRC
            }
            else
            {
                transferSections.add(lastSection);
                lastSection = Pair.create(chunk.offset, chunk.offset + chunk.length + 4);
            }
        }
        else
        {
            lastSection = Pair.create(chunk.offset, chunk.offset + chunk.length + 4);
        }
    }
    if (lastSection != null)
        transferSections.add(lastSection);
    return transferSections;
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:29,代码来源:CompressedStreamWriter.java

示例7: deserialize

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
public CompressionInfo deserialize(DataInputPlus in, int version) throws IOException
{
    // chunks
    int chunkCount = in.readInt();
    if (chunkCount < 0)
        return null;

    CompressionMetadata.Chunk[] chunks = new CompressionMetadata.Chunk[chunkCount];
    for (int i = 0; i < chunkCount; i++)
        chunks[i] = CompressionMetadata.Chunk.serializer.deserialize(in, version);

    // compression params
    CompressionParams parameters = CompressionParams.serializer.deserialize(in, version);
    return new CompressionInfo(chunks, parameters);
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:16,代码来源:CompressionInfo.java

示例8: runMayThrow

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
protected void runMayThrow() throws Exception
{
    byte[] compressedWithCRC;
    while (chunks.hasNext())
    {
        CompressionMetadata.Chunk chunk = chunks.next();

        int readLength = chunk.length + 4; // read with CRC
        compressedWithCRC = new byte[readLength];

        int bufferRead = 0;
        while (bufferRead < readLength)
        {
            try
            {
                int r = source.read(compressedWithCRC, bufferRead, readLength - bufferRead);
                if (r < 0)
                {
                    readException = new EOFException("No chunk available");
                    dataBuffer.put(POISON_PILL);
                    return; // throw exception where we consume dataBuffer
                }
                bufferRead += r;
            }
            catch (IOException e)
            {
                logger.warn("Error while reading compressed input stream.", e);
                readException = e;
                dataBuffer.put(POISON_PILL);
                return; // throw exception where we consume dataBuffer
            }
        }
        dataBuffer.put(compressedWithCRC);
    }
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:36,代码来源:CompressedInputStream.java

示例9: overrideWithGarbage

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
private void overrideWithGarbage(SSTableReader sstable, ByteBuffer key1, ByteBuffer key2) throws IOException
{
    boolean compression = Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false"));
    long startPosition, endPosition;

    if (compression)
    { // overwrite with garbage the compression chunks from key1 to key2
        CompressionMetadata compData = CompressionMetadata.create(sstable.getFilename());

        CompressionMetadata.Chunk chunk1 = compData.chunkFor(
                sstable.getPosition(PartitionPosition.ForKey.get(key1, sstable.getPartitioner()), SSTableReader.Operator.EQ).position);
        CompressionMetadata.Chunk chunk2 = compData.chunkFor(
                sstable.getPosition(PartitionPosition.ForKey.get(key2, sstable.getPartitioner()), SSTableReader.Operator.EQ).position);

        startPosition = Math.min(chunk1.offset, chunk2.offset);
        endPosition = Math.max(chunk1.offset + chunk1.length, chunk2.offset + chunk2.length);

        compData.close();
    }
    else
    { // overwrite with garbage from key1 to key2
        long row0Start = sstable.getPosition(PartitionPosition.ForKey.get(key1, sstable.getPartitioner()), SSTableReader.Operator.EQ).position;
        long row1Start = sstable.getPosition(PartitionPosition.ForKey.get(key2, sstable.getPartitioner()), SSTableReader.Operator.EQ).position;
        startPosition = Math.min(row0Start, row1Start);
        endPosition = Math.max(row0Start, row1Start);
    }

    overrideWithGarbage(sstable, startPosition, endPosition);
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:30,代码来源:ScrubTest.java

示例10: getTransferSections

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
private List<Pair<Long, Long>> getTransferSections(CompressionMetadata.Chunk[] chunks)
{
    List<Pair<Long, Long>> transferSections = new ArrayList<Pair<Long, Long>>();
    Pair<Long, Long> lastSection = null;
    for (CompressionMetadata.Chunk chunk : chunks)
    {
        if (lastSection != null)
        {
            if (chunk.offset == lastSection.right)
            {
                // extend previous section to end of this chunk
                lastSection = Pair.create(lastSection.left, chunk.offset + chunk.length + 4); // 4 bytes for CRC
            }
            else
            {
                transferSections.add(lastSection);
                lastSection = Pair.create(chunk.offset, chunk.offset + chunk.length + 4);
            }
        }
        else
        {
            lastSection = Pair.create(chunk.offset, chunk.offset + chunk.length + 4);
        }
    }
    if (lastSection != null)
        transferSections.add(lastSection);
    return transferSections;
}
 
开发者ID:wso2,项目名称:wso2-cassandra,代码行数:29,代码来源:CompressedFileStreamTask.java

示例11: PendingFile

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
public PendingFile(SSTableReader sstable,
                   Descriptor desc,
                   String component,
                   List<Pair<Long,Long>> sections,
                   OperationType type,
                   long estimatedKeys,
                   CompressionInfo compressionInfo)
{
    this.sstable = sstable;
    this.desc = desc;
    this.component = component;
    this.sections = sections;
    this.type = type;

    long tempSize = 0;
    if (compressionInfo == null)
    {
        for (Pair<Long, Long> section : sections)
            tempSize += section.right - section.left;
    }
    else
    {
        // calculate total length of transferring chunks
        for (CompressionMetadata.Chunk chunk : compressionInfo.chunks)
            tempSize += chunk.length + 4; // 4 bytes for CRC
    }
    size = tempSize;

    this.estimatedKeys = estimatedKeys;
    this.compressionInfo = compressionInfo;
}
 
开发者ID:dprguiuc,项目名称:Cassandra-Wasef,代码行数:32,代码来源:PendingFile.java

示例12: CompressionInfo

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
public CompressionInfo(CompressionMetadata.Chunk[] chunks, CompressionParameters parameters)
{
    assert chunks != null && parameters != null;
    this.chunks = chunks;
    this.parameters = parameters;
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:7,代码来源:CompressionInfo.java

示例13: testMapForCompressionMetadata

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
@Test
public void testMapForCompressionMetadata() throws Exception
{
    int OLD_MAX_SEGMENT_SIZE = MmappedRegions.MAX_SEGMENT_SIZE;
    MmappedRegions.MAX_SEGMENT_SIZE = 1024;

    ByteBuffer buffer = allocateBuffer(128 * 1024);
    File f = File.createTempFile("testMapForCompressionMetadata", "1");
    f.deleteOnExit();

    File cf = File.createTempFile(f.getName() + ".metadata", "1");
    cf.deleteOnExit();

    MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
    try(SequentialWriter writer = new CompressedSequentialWriter(f,
                                                                 cf.getAbsolutePath(),
                                                                 CompressionParams.snappy(),
                                                                 sstableMetadataCollector))
    {
        writer.write(buffer);
        writer.finish();
    }

    CompressionMetadata metadata = new CompressionMetadata(cf.getAbsolutePath(), f.length(), ChecksumType.CRC32);
    try(ChannelProxy channel = new ChannelProxy(f);
        MmappedRegions regions = MmappedRegions.map(channel, metadata))
    {

        assertFalse(regions.isEmpty());
        int i = 0;
        while(i < buffer.capacity())
        {
            CompressionMetadata.Chunk chunk = metadata.chunkFor(i);

            MmappedRegions.Region region = regions.floor(chunk.offset);
            assertNotNull(region);

            ByteBuffer compressedChunk = region.buffer.duplicate();
            assertNotNull(compressedChunk);
            assertEquals(chunk.length + 4, compressedChunk.capacity());

            assertEquals(chunk.offset, region.bottom());
            assertEquals(chunk.offset + chunk.length + 4, region.top());

            i += metadata.chunkLength();
        }
    }
    finally
    {
        MmappedRegions.MAX_SEGMENT_SIZE = OLD_MAX_SEGMENT_SIZE;
        metadata.close();
    }
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:54,代码来源:MmappedRegionsTest.java

示例14: CompressionInfo

import org.apache.cassandra.io.compress.CompressionMetadata; //导入方法依赖的package包/类
public CompressionInfo(CompressionMetadata.Chunk[] chunks, CompressionParams parameters)
{
    assert chunks != null && parameters != null;
    this.chunks = chunks;
    this.parameters = parameters;
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:7,代码来源:CompressionInfo.java


注:本文中的org.apache.cassandra.io.compress.CompressionMetadata.Chunk方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。