当前位置: 首页>>代码示例>>Java>>正文


Java SequentialWriter.write方法代码示例

本文整理汇总了Java中org.apache.cassandra.io.util.SequentialWriter.write方法的典型用法代码示例。如果您正苦于以下问题:Java SequentialWriter.write方法的具体用法?Java SequentialWriter.write怎么用?Java SequentialWriter.write使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.cassandra.io.util.SequentialWriter的用法示例。


在下文中一共展示了SequentialWriter.write方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testGetFilePointer

import org.apache.cassandra.io.util.SequentialWriter; //导入方法依赖的package包/类
@Test
public void testGetFilePointer() throws IOException {
    final SequentialWriter w = createTempFile("brafGetFilePointer");

    assertEquals(w.getFilePointer(), 0); // initial position should be 0

    w.write(generateByteArray(20));
    assertEquals(w.getFilePointer(), 20); // position 20 after writing 20 bytes

    w.sync();

    RandomAccessReader r = RandomAccessReader.open(w, fs);

    // position should change after skip bytes
    r.seek(0);
    r.skipBytes(15);
    assertEquals(r.getFilePointer(), 15);

    r.read();
    assertEquals(r.getFilePointer(), 16);
    r.read(new byte[4]);
    assertEquals(r.getFilePointer(), 20);

    w.close();
    r.close();
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:27,代码来源:BufferedRandomAccessFileTest.java

示例2: testBytesRemaining

import org.apache.cassandra.io.util.SequentialWriter; //导入方法依赖的package包/类
@Test
public void testBytesRemaining() throws IOException {
    SequentialWriter w = createTempFile("brafBytesRemaining");

    int toWrite = RandomAccessReader.DEFAULT_BUFFER_SIZE + 10;

    w.write(generateByteArray(toWrite));

    w.sync();

    RandomAccessReader r = RandomAccessReader.open(w, fs);

    assertEquals(r.bytesRemaining(), toWrite);

    for (int i = 1; i <= r.length(); i++) {
        r.read();
        assertEquals(r.bytesRemaining(), r.length() - i);
    }

    r.seek(0);
    r.skipBytes(10);
    assertEquals(r.bytesRemaining(), r.length() - 10);

    w.close();
    r.close();
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:27,代码来源:BufferedRandomAccessFileTest.java

示例3: testLength

import org.apache.cassandra.io.util.SequentialWriter; //导入方法依赖的package包/类
@Test
public void testLength() throws IOException {
    File tmpFile = File.createTempFile("lengthtest", "bin");
    SequentialWriter w = SequentialWriter.open(tmpFile);
    assertEquals(0, w.length());

    // write a chunk smaller then our buffer, so will not be flushed
    // to disk
    byte[] lessThenBuffer = generateByteArray(RandomAccessReader.DEFAULT_BUFFER_SIZE / 2);
    w.write(lessThenBuffer);
    assertEquals(lessThenBuffer.length, w.length());

    // sync the data and check length
    w.sync();
    assertEquals(lessThenBuffer.length, w.length());

    // write more then the buffer can hold and check length
    byte[] biggerThenBuffer = generateByteArray(RandomAccessReader.DEFAULT_BUFFER_SIZE * 2);
    w.write(biggerThenBuffer);
    assertEquals(biggerThenBuffer.length + lessThenBuffer.length, w.length());

    w.close();

    // will use cachedlength
    RandomAccessReader r = RandomAccessReader.open(new Path(tmpFile.getPath()), fs);
    assertEquals(lessThenBuffer.length + biggerThenBuffer.length, r.length());
    r.close();
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:29,代码来源:BufferedRandomAccessFileTest.java

示例4: testSeek

import org.apache.cassandra.io.util.SequentialWriter; //导入方法依赖的package包/类
@Test
public void testSeek() throws Exception {
    SequentialWriter w = createTempFile("brafSeek");
    byte[] data = generateByteArray(RandomAccessReader.DEFAULT_BUFFER_SIZE + 20);
    w.write(data);
    w.close();

    final RandomAccessReader file = RandomAccessReader.open(w, fs);

    file.seek(0);
    assertEquals(file.getFilePointer(), 0);
    assertEquals(file.bytesRemaining(), file.length());

    file.seek(20);
    assertEquals(file.getFilePointer(), 20);
    assertEquals(file.bytesRemaining(), file.length() - 20);

    // trying to seek past the end of the file should produce EOFException
    expectException(new Callable<Object>() {
        public Object call() {
            file.seek(file.length() + 30);
            return null;
        }
    }, IllegalArgumentException.class);

    expectException(new Callable<Object>() {
        public Object call() throws IOException {
            file.seek(-1);
            return null;
        }
    }, IllegalArgumentException.class); // throws IllegalArgumentException

    file.close();
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:35,代码来源:BufferedRandomAccessFileTest.java

示例5: testClose

import org.apache.cassandra.io.util.SequentialWriter; //导入方法依赖的package包/类
@Test
public void testClose() throws IOException {
    final SequentialWriter w = createTempFile("brafClose");

    byte[] data = generateByteArray(RandomAccessReader.DEFAULT_BUFFER_SIZE + 20);

    w.write(data);
    w.close(); // will flush

    final RandomAccessReader r = RandomAccessReader.open(new Path(new File(w.getPath()).getPath()), fs);

    r.close(); // closing to test read after close

    expectException(new Callable<Object>() {
        public Object call() {
            return r.read();
        }
    }, AssertionError.class);

    expectException(new Callable<Object>() {
        public Object call() throws IOException {
            w.write(generateByteArray(1));
            return null;
        }
    }, ClosedChannelException.class);

    RandomAccessReader copy = RandomAccessReader.open(new Path(new File(r.getPath()).getPath()), fs);
    ByteBuffer contents = copy.readBytes((int) copy.length());

    assertEquals(contents.limit(), data.length);
    assertEquals(ByteBufferUtil.compare(contents, data), 0);
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:33,代码来源:BufferedRandomAccessFileTest.java

示例6: testMarkAndReset

import org.apache.cassandra.io.util.SequentialWriter; //导入方法依赖的package包/类
@Test
public void testMarkAndReset() throws IOException {
    SequentialWriter w = createTempFile("brafTestMark");
    w.write(new byte[30]);

    w.close();

    RandomAccessReader file = RandomAccessReader.open(w, fs);

    file.seek(10);
    FileMark mark = file.mark();

    file.seek(file.length());
    assertTrue(file.isEOF());

    file.reset();
    assertEquals(file.bytesRemaining(), 20);

    file.seek(file.length());
    assertTrue(file.isEOF());

    file.reset(mark);
    assertEquals(file.bytesRemaining(), 20);

    file.seek(file.length());
    assertEquals(file.bytesPastMark(), 20);
    assertEquals(file.bytesPastMark(mark), 20);

    file.reset(mark);
    assertEquals(file.bytesPastMark(), 0);

    file.close();
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:34,代码来源:BufferedRandomAccessFileTest.java

示例7: testAssertionErrorWhenBytesPastMarkIsNegative

import org.apache.cassandra.io.util.SequentialWriter; //导入方法依赖的package包/类
@Test(expected = AssertionError.class)
public void testAssertionErrorWhenBytesPastMarkIsNegative() throws IOException {
    SequentialWriter w = createTempFile("brafAssertionErrorWhenBytesPastMarkIsNegative");
    w.write(new byte[30]);
    w.close();

    RandomAccessReader r = RandomAccessReader.open(w, fs);
    r.seek(10);
    r.mark();

    r.seek(0);
    r.bytesPastMark();
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:14,代码来源:BufferedRandomAccessFileTest.java

示例8: testDataCorruptionDetection

import org.apache.cassandra.io.util.SequentialWriter; //导入方法依赖的package包/类
@Test
public void testDataCorruptionDetection() throws IOException
{
    String CONTENT = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam vitae.";

    File file = new File("testDataCorruptionDetection");
    file.deleteOnExit();

    File metadata = new File(file.getPath() + ".meta");
    metadata.deleteOnExit();

    MetadataCollector sstableMetadataCollector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance)).replayPosition(null);
    SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(), new CompressionParameters(SnappyCompressor.instance), sstableMetadataCollector);

    writer.write(CONTENT.getBytes());
    writer.close();

    // open compression metadata and get chunk information
    CompressionMetadata meta = new CompressionMetadata(metadata.getPath(), file.length(), true);
    CompressionMetadata.Chunk chunk = meta.chunkFor(0);

    RandomAccessReader reader = CompressedRandomAccessReader.open(file.getPath(), meta);
    // read and verify compressed data
    assertEquals(CONTENT, reader.readLine());
    // close reader
    reader.close();

    Random random = new Random();
    RandomAccessFile checksumModifier = null;

    try
    {
        checksumModifier = new RandomAccessFile(file, "rw");
        byte[] checksum = new byte[4];

        // seek to the end of the compressed chunk
        checksumModifier.seek(chunk.length);
        // read checksum bytes
        checksumModifier.read(checksum);
        // seek back to the chunk end
        checksumModifier.seek(chunk.length);

        // lets modify one byte of the checksum on each iteration
        for (int i = 0; i < checksum.length; i++)
        {
            checksumModifier.write(random.nextInt());
            checksumModifier.getFD().sync(); // making sure that change was synced with disk

            final RandomAccessReader r = CompressedRandomAccessReader.open(file.getPath(), meta);

            Throwable exception = null;
            try
            {
                r.readLine();
            }
            catch (Throwable t)
            {
                exception = t;
            }
            assertNotNull(exception);
            assertEquals(exception.getClass(), CorruptSSTableException.class);
            assertEquals(exception.getCause().getClass(), CorruptBlockException.class);

            r.close();
        }

        // lets write original checksum and check if we can read data
        updateChecksum(checksumModifier, chunk.length, checksum);

        reader = CompressedRandomAccessReader.open(file.getPath(), meta);
        // read and verify compressed data
        assertEquals(CONTENT, reader.readLine());
        // close reader
        reader.close();
    }
    finally
    {
        if (checksumModifier != null)
            checksumModifier.close();
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:82,代码来源:CompressedRandomAccessReaderTest.java

示例9: testDataCorruptionDetection

import org.apache.cassandra.io.util.SequentialWriter; //导入方法依赖的package包/类
@Test
public void testDataCorruptionDetection() throws IOException {
    String CONTENT = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam vitae.";

    File file = new File("testDataCorruptionDetection");
    file.deleteOnExit();

    File metadata = new File(file.getPath() + ".meta");
    metadata.deleteOnExit();

    SSTableMetadata.Collector sstableMetadataCollector = SSTableMetadata.createCollector().replayPosition(null);
    SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(), false, new CompressionParameters(SnappyCompressor.instance), sstableMetadataCollector);

    writer.write(CONTENT.getBytes());
    writer.close();

    // open compression metadata and get chunk information
    CompressionMetadata meta = new CompressionMetadata(metadata.getPath(), file.length(), fs);
    CompressionMetadata.Chunk chunk = meta.chunkFor(0);

    RandomAccessReader reader = CompressedRandomAccessReader.open(new Path(file.getPath()), meta, false, fs);
    // read and verify compressed data
    assertEquals(CONTENT, reader.readLine());
    // close reader
    reader.close();

    Random random = new Random();
    RandomAccessFile checksumModifier = null;

    try {
        checksumModifier = new RandomAccessFile(file, "rw");
        byte[] checksum = new byte[4];

        // seek to the end of the compressed chunk
        checksumModifier.seek(chunk.length);
        // read checksum bytes
        checksumModifier.read(checksum);
        // seek back to the chunk end
        checksumModifier.seek(chunk.length);

        // lets modify one byte of the checksum on each iteration
        for (int i = 0; i < checksum.length; i++) {
            checksumModifier.write(random.nextInt());
            checksumModifier.getFD().sync(); // making sure that change was synced with disk

            final RandomAccessReader r = CompressedRandomAccessReader.open(new Path(file.getPath()), meta, false, fs);

            Throwable exception = null;
            try {
                r.readLine();
            } catch (Throwable t) {
                exception = t;
            }
            assertNotNull(exception);
            assertEquals(exception.getClass(), CorruptSSTableException.class);
            assertEquals(exception.getCause().getClass(), CorruptBlockException.class);

            r.close();
        }

        // lets write original checksum and check if we can read data
        updateChecksum(checksumModifier, chunk.length, checksum);

        reader = CompressedRandomAccessReader.open(new Path(file.getPath()), meta, false, fs);
        // read and verify compressed data
        assertEquals(CONTENT, reader.readLine());
        // close reader
        reader.close();
    } finally {
        if (checksumModifier != null)
            checksumModifier.close();
    }
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:74,代码来源:CompressedRandomAccessReaderTest.java

示例10: testReadOnly

import org.apache.cassandra.io.util.SequentialWriter; //导入方法依赖的package包/类
@Test
public void testReadOnly() throws IOException {
    SequentialWriter file = createTempFile("brafReadOnlyTest");

    byte[] data = new byte[20];
    for (int i = 0; i < data.length; i++)
        data[i] = 'c';

    file.write(data);
    file.sync(); // flushing file to the disk

    // read-only copy of the file, with fixed file length
    final RandomAccessReader copy = RandomAccessReader.open(new Path(file.getPath()), fs);

    copy.seek(copy.length());
    assertTrue(copy.bytesRemaining() == 0 && copy.isEOF());

    // can't seek past the end of the file for read-only files
    expectException(new Callable<Object>() {
        public Object call() {
            copy.seek(copy.length() + 1);
            return null;
        }
    }, IllegalArgumentException.class);

    // Any write() call should fail
    expectException(new Callable<Object>() {
        public Object call() throws IOException {
            copy.write(1);
            return null;
        }
    }, UnsupportedOperationException.class);

    expectException(new Callable<Object>() {
        public Object call() throws IOException {
            copy.write(new byte[1]);
            return null;
        }
    }, UnsupportedOperationException.class);

    expectException(new Callable<Object>() {
        public Object call() throws IOException {
            copy.write(new byte[3], 0, 2);
            return null;
        }
    }, UnsupportedOperationException.class);

    copy.seek(0);
    copy.skipBytes(5);

    assertEquals(copy.bytesRemaining(), 15);
    assertEquals(copy.getFilePointer(), 5);
    assertTrue(!copy.isEOF());

    copy.seek(0);
    ByteBuffer contents = copy.readBytes((int) copy.length());

    assertEquals(contents.limit(), copy.length());
    assertTrue(ByteBufferUtil.compare(contents, data) == 0);

    copy.seek(0);

    int count = 0;
    while (!copy.isEOF()) {
        assertEquals((byte) copy.read(), 'c');
        count++;
    }

    assertEquals(count, copy.length());

    copy.seek(0);
    byte[] content = new byte[10];
    copy.read(content);

    assertEquals(new String(content), "cccccccccc");

    file.close();
    copy.close();
}
 
开发者ID:fullcontact,项目名称:hadoop-sstable,代码行数:80,代码来源:BufferedRandomAccessFileTest.java


注:本文中的org.apache.cassandra.io.util.SequentialWriter.write方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。