当前位置: 首页>>代码示例>>Java>>正文


Java FileMark类代码示例

本文整理汇总了Java中org.apache.cassandra.io.util.FileMark的典型用法代码示例。如果您正苦于以下问题:Java FileMark类的具体用法?Java FileMark怎么用?Java FileMark使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FileMark类属于org.apache.cassandra.io.util包,在下文中一共展示了FileMark类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: test6791

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Test
public void test6791() throws IOException, ConfigurationException
{
    File f = File.createTempFile("compressed6791_", "3");
    String filename = f.getAbsolutePath();
    try
    {

        MetadataCollector sstableMetadataCollector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance));
        CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", new CompressionParameters(SnappyCompressor.instance, 32, Collections.<String, String>emptyMap()), sstableMetadataCollector);

        for (int i = 0; i < 20; i++)
            writer.write("x".getBytes());

        FileMark mark = writer.mark();
        // write enough garbage to create new chunks:
        for (int i = 0; i < 40; ++i)
            writer.write("y".getBytes());

        writer.resetAndTruncate(mark);

        for (int i = 0; i < 20; i++)
            writer.write("x".getBytes());
        writer.close();

        CompressedRandomAccessReader reader = CompressedRandomAccessReader.open(filename, new CompressionMetadata(filename + ".metadata", f.length(), true));
        String res = reader.readLine();
        assertEquals(res, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
        assertEquals(40, res.length());
    }
    finally
    {
        // cleanup
        if (f.exists())
            f.delete();
        File metadata = new File(filename+ ".metadata");
            if (metadata.exists())
                metadata.delete();
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:41,代码来源:CompressedRandomAccessReaderTest.java

示例2: testMarkAndReset

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Test
public void testMarkAndReset() throws IOException {
    SequentialWriter w = createTempFile("brafTestMark");
    w.write(new byte[30]);

    w.close();

    RandomAccessReader file = RandomAccessReader.open(w, fs);

    file.seek(10);
    FileMark mark = file.mark();

    file.seek(file.length());
    assertTrue(file.isEOF());

    file.reset();
    assertEquals(file.bytesRemaining(), 20);

    file.seek(file.length());
    assertTrue(file.isEOF());

    file.reset(mark);
    assertEquals(file.bytesRemaining(), 20);

    file.seek(file.length());
    assertEquals(file.bytesPastMark(), 20);
    assertEquals(file.bytesPastMark(mark), 20);

    file.reset(mark);
    assertEquals(file.bytesPastMark(), 0);

    file.close();
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:34,代码来源:BufferedRandomAccessFileTest.java

示例3: mark

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public FileMark mark()
{
    return new CompressedFileWriterMark(chunkOffset, current, validBufferBytes, chunkCount + 1);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:6,代码来源:CompressedSequentialWriter.java

示例4: resetAndTruncate

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
    assert mark instanceof CompressedFileWriterMark;

    CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;

    // reset position
    current = realMark.uncDataOffset;

    if (realMark.chunkOffset == chunkOffset) // current buffer
    {
        // just reset a buffer offset and return
        validBufferBytes = realMark.bufferOffset;
        return;
    }

    // synchronize current buffer with disk
    // because we don't want any data loss
    syncInternal();

    // setting marker as a current offset
    chunkOffset = realMark.chunkOffset;

    // compressed chunk size (- 4 bytes reserved for checksum)
    int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
    if (compressed.buffer.length < chunkSize)
        compressed.buffer = new byte[chunkSize];

    try
    {
        out.seek(chunkOffset);
        out.readFully(compressed.buffer, 0, chunkSize);

        try
        {
            // repopulate buffer
            compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
        }
        catch (IOException e)
        {
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
        }

        Checksum checksum = new Adler32();
        checksum.update(compressed.buffer, 0, chunkSize);

        if (out.readInt() != (int) checksum.getValue())
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
    }
    catch (CorruptBlockException e)
    {
        throw new CorruptSSTableException(e, getPath());
    }
    catch (EOFException e)
    {
        throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
    }
    catch (IOException e)
    {
        throw new FSReadError(e, getPath());
    }

    // reset buffer
    validBufferBytes = realMark.bufferOffset;
    bufferOffset = current - validBufferBytes;
    chunkCount = realMark.nextChunkIndex - 1;

    // mark as dirty so we don't lose the bytes on subsequent reBuffer calls
    isDirty = true;

    // truncate data and index file
    truncate(chunkOffset);
    metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:76,代码来源:CompressedSequentialWriter.java

示例5: resetAndTruncate

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
    assert mark instanceof CompressedFileWriterMark;

    CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;

    // reset position
    current = realMark.uncDataOffset;

    if (realMark.chunkOffset == chunkOffset) // current buffer
    {
        // just reset a buffer offset and return
        validBufferBytes = realMark.bufferOffset;
        return;
    }

    // synchronize current buffer with disk
    // because we don't want any data loss
    syncInternal();

    // setting marker as a current offset
    chunkOffset = realMark.chunkOffset;

    // compressed chunk size (- 4 bytes reserved for checksum)
    int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
    if (compressed.buffer.length < chunkSize)
        compressed.buffer = new byte[chunkSize];

    try
    {
        out.seek(chunkOffset);
        out.readFully(compressed.buffer, 0, chunkSize);

        int validBytes;
        try
        {
            // decompress data chunk and store its length
            validBytes = compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
        }
        catch (IOException e)
        {
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
        }

        checksum.update(compressed.buffer, 0, chunkSize);

        if (out.readInt() != (int) checksum.getValue())
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
    }
    catch (CorruptBlockException e)
    {
        throw new CorruptSSTableException(e, getPath());
    }
    catch (EOFException e)
    {
        throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
    }
    catch (IOException e)
    {
        throw new FSReadError(e, getPath());
    }

    checksum.reset();

    // reset buffer
    validBufferBytes = realMark.bufferOffset;
    bufferOffset = current - validBufferBytes;
    chunkCount = realMark.nextChunkIndex - 1;

    // truncate data and index file
    truncate(chunkOffset);
    metadataWriter.resetAndTruncate(realMark.nextChunkIndex);
}
 
开发者ID:pgaref,项目名称:ACaZoo,代码行数:75,代码来源:CompressedSequentialWriter.java

示例6: resetAndTruncate

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
    assert mark instanceof CompressedFileWriterMark;

    CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;

    // reset position
    current = realMark.uncDataOffset;

    if (realMark.chunkOffset == chunkOffset) // current buffer
    {
        // just reset a buffer offset and return
        validBufferBytes = realMark.bufferOffset;
        return;
    }

    // synchronize current buffer with disk
    // because we don't want any data loss
    syncInternal();

    // setting marker as a current offset
    chunkOffset = realMark.chunkOffset;

    // compressed chunk size (- 4 bytes reserved for checksum)
    int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
    if (compressed.buffer.length < chunkSize)
        compressed.buffer = new byte[chunkSize];

    try
    {
        out.seek(chunkOffset);
        out.readFully(compressed.buffer, 0, chunkSize);

        try
        {
            // repopulate buffer
            compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
        }
        catch (IOException e)
        {
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
        }

        Checksum checksum = new Adler32();
        checksum.update(compressed.buffer, 0, chunkSize);

        if (out.readInt() != (int) checksum.getValue())
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
    }
    catch (CorruptBlockException e)
    {
        throw new CorruptSSTableException(e, getPath());
    }
    catch (EOFException e)
    {
        throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
    }
    catch (IOException e)
    {
        throw new FSReadError(e, getPath());
    }

    // reset buffer
    validBufferBytes = realMark.bufferOffset;
    bufferOffset = current - validBufferBytes;
    chunkCount = realMark.nextChunkIndex - 1;

    // truncate data and index file
    truncate(chunkOffset);
    metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
}
 
开发者ID:daidong,项目名称:GraphTrek,代码行数:73,代码来源:CompressedSequentialWriter.java

示例7: resetAndTruncate

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
    assert mark instanceof CompressedFileWriterMark;

    CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;

    // reset position
    current = realMark.uncDataOffset;

    if (realMark.chunkOffset == chunkOffset) // current buffer
    {
        // just reset a buffer offset and return
        validBufferBytes = realMark.bufferOffset;
        return;
    }

    // synchronize current buffer with disk
    // because we don't want any data loss
    syncInternal();

    // setting marker as a current offset
    chunkOffset = realMark.chunkOffset;

    // compressed chunk size (- 4 bytes reserved for checksum)
    int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
    if (compressed.buffer.length < chunkSize)
        compressed.buffer = new byte[chunkSize];

    try
    {
        out.seek(chunkOffset);
        out.readFully(compressed.buffer, 0, chunkSize);

        int validBytes;
        try
        {
            // decompress data chunk and store its length
            validBytes = compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
        }
        catch (IOException e)
        {
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
        }

        checksum.update(buffer, 0, validBytes);

        if (out.readInt() != (int) checksum.getValue())
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
    }
    catch (CorruptBlockException e)
    {
        throw new CorruptSSTableException(e, getPath());
    }
    catch (EOFException e)
    {
        throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
    }
    catch (IOException e)
    {
        throw new FSReadError(e, getPath());
    }

    checksum.reset();

    // reset buffer
    validBufferBytes = realMark.bufferOffset;
    bufferOffset = current - validBufferBytes;
    chunkCount = realMark.nextChunkIndex - 1;

    // truncate data and index file
    truncate(chunkOffset);
    metadataWriter.resetAndTruncate(realMark.nextChunkIndex);
}
 
开发者ID:dprguiuc,项目名称:Cassandra-Wasef,代码行数:75,代码来源:CompressedSequentialWriter.java

示例8: resetAndTruncate

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
    assert mark instanceof CompressedFileWriterMark;

    CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;

    // reset position
    current = realMark.uncDataOffset;

    if (realMark.chunkOffset == chunkOffset) // current buffer
    {
        // just reset a buffer offset and return
        validBufferBytes = realMark.bufferOffset;
        return;
    }

    // synchronize current buffer with disk
    // because we don't want any data loss
    syncInternal();

    // setting marker as a current offset
    chunkOffset = realMark.chunkOffset;

    // compressed chunk size (- 4 bytes reserved for checksum)
    int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
    if (compressed.buffer.length < chunkSize)
        compressed.buffer = new byte[chunkSize];

    try
    {
        out.seek(chunkOffset);
        out.readFully(compressed.buffer, 0, chunkSize);

        try
        {
            // repopulate buffer
            compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
        }
        catch (IOException e)
        {
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
        }

        checksum.update(compressed.buffer, 0, chunkSize);

        if (out.readInt() != (int) checksum.getValue())
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
    }
    catch (CorruptBlockException e)
    {
        throw new CorruptSSTableException(e, getPath());
    }
    catch (EOFException e)
    {
        throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
    }
    catch (IOException e)
    {
        throw new FSReadError(e, getPath());
    }

    checksum.reset();

    // reset buffer
    validBufferBytes = realMark.bufferOffset;
    bufferOffset = current - validBufferBytes;
    chunkCount = realMark.nextChunkIndex - 1;

    // truncate data and index file
    truncate(chunkOffset);
    metadataWriter.resetAndTruncate(realMark.nextChunkIndex);
}
 
开发者ID:mafernandez-stratio,项目名称:cassandra-cqlMod,代码行数:74,代码来源:CompressedSequentialWriter.java

示例9: resetAndTruncate

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
    assert mark instanceof CompressedFileWriterMark;

    CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;

    // reset position
    current = realMark.uncDataOffset;

    if (realMark.chunkOffset == chunkOffset) // current buffer
    {
        // just reset a buffer offset and return
        validBufferBytes = realMark.bufferOffset;
        return;
    }

    // synchronize current buffer with disk
    // because we don't want any data loss
    syncInternal();

    // setting marker as a current offset
    chunkOffset = realMark.chunkOffset;

    // compressed chunk size (- 4 bytes reserved for checksum)
    int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
    if (compressed.buffer.length < chunkSize)
        compressed.buffer = new byte[chunkSize];

    try
    {
        out.seek(chunkOffset);
        out.readFully(compressed.buffer, 0, chunkSize);

        int validBytes;
        try
        {
            // decompress data chunk and store its length
            validBytes = compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
        }
        catch (IOException e)
        {
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
        }

        checksum.update(buffer, 0, validBytes);

        if (out.readInt() != (int) checksum.getValue())
            throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
    }
    catch (CorruptBlockException e)
    {
        throw new CorruptSSTableException(e, getPath());
    }
    catch (EOFException e)
    {
        throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
    }
    catch (IOException e)
    {
        throw new FSReadError(e, getPath());
    }

    checksum.reset();

    // reset buffer
    validBufferBytes = realMark.bufferOffset;
    bufferOffset = current - validBufferBytes;
    chunkCount = realMark.nextChunkIndex - 1;

    // truncate data and index file
    truncate(chunkOffset);
    metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
}
 
开发者ID:jackliu8722,项目名称:cassandra-1.2.16,代码行数:75,代码来源:CompressedSequentialWriter.java

示例10: mark

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
public FileMark mark() {
    markedPointer = current;
    return new BufferedRandomAccessFileMark(markedPointer);
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:5,代码来源:RandomAccessReader.java

示例11: reset

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
public void reset(FileMark mark) {
    assert mark instanceof BufferedRandomAccessFileMark;
    seek(((BufferedRandomAccessFileMark) mark).pointer);
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:5,代码来源:RandomAccessReader.java

示例12: bytesPastMark

import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
public long bytesPastMark(FileMark mark) {
    assert mark instanceof BufferedRandomAccessFileMark;
    long bytes = current - ((BufferedRandomAccessFileMark) mark).pointer;
    assert bytes >= 0;
    return bytes;
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:7,代码来源:RandomAccessReader.java


注:本文中的org.apache.cassandra.io.util.FileMark类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。