本文整理汇总了Java中org.apache.cassandra.io.util.FileMark类的典型用法代码示例。如果您正苦于以下问题:Java FileMark类的具体用法?Java FileMark怎么用?Java FileMark使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FileMark类属于org.apache.cassandra.io.util包,在下文中一共展示了FileMark类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: test6791
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Test
public void test6791() throws IOException, ConfigurationException
{
File f = File.createTempFile("compressed6791_", "3");
String filename = f.getAbsolutePath();
try
{
MetadataCollector sstableMetadataCollector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance));
CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", new CompressionParameters(SnappyCompressor.instance, 32, Collections.<String, String>emptyMap()), sstableMetadataCollector);
for (int i = 0; i < 20; i++)
writer.write("x".getBytes());
FileMark mark = writer.mark();
// write enough garbage to create new chunks:
for (int i = 0; i < 40; ++i)
writer.write("y".getBytes());
writer.resetAndTruncate(mark);
for (int i = 0; i < 20; i++)
writer.write("x".getBytes());
writer.close();
CompressedRandomAccessReader reader = CompressedRandomAccessReader.open(filename, new CompressionMetadata(filename + ".metadata", f.length(), true));
String res = reader.readLine();
assertEquals(res, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
assertEquals(40, res.length());
}
finally
{
// cleanup
if (f.exists())
f.delete();
File metadata = new File(filename+ ".metadata");
if (metadata.exists())
metadata.delete();
}
}
示例2: testMarkAndReset
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Test
public void testMarkAndReset() throws IOException {
SequentialWriter w = createTempFile("brafTestMark");
w.write(new byte[30]);
w.close();
RandomAccessReader file = RandomAccessReader.open(w, fs);
file.seek(10);
FileMark mark = file.mark();
file.seek(file.length());
assertTrue(file.isEOF());
file.reset();
assertEquals(file.bytesRemaining(), 20);
file.seek(file.length());
assertTrue(file.isEOF());
file.reset(mark);
assertEquals(file.bytesRemaining(), 20);
file.seek(file.length());
assertEquals(file.bytesPastMark(), 20);
assertEquals(file.bytesPastMark(mark), 20);
file.reset(mark);
assertEquals(file.bytesPastMark(), 0);
file.close();
}
示例3: mark
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public FileMark mark()
{
return new CompressedFileWriterMark(chunkOffset, current, validBufferBytes, chunkCount + 1);
}
示例4: resetAndTruncate
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
assert mark instanceof CompressedFileWriterMark;
CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;
// reset position
current = realMark.uncDataOffset;
if (realMark.chunkOffset == chunkOffset) // current buffer
{
// just reset a buffer offset and return
validBufferBytes = realMark.bufferOffset;
return;
}
// synchronize current buffer with disk
// because we don't want any data loss
syncInternal();
// setting marker as a current offset
chunkOffset = realMark.chunkOffset;
// compressed chunk size (- 4 bytes reserved for checksum)
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
if (compressed.buffer.length < chunkSize)
compressed.buffer = new byte[chunkSize];
try
{
out.seek(chunkOffset);
out.readFully(compressed.buffer, 0, chunkSize);
try
{
// repopulate buffer
compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
}
catch (IOException e)
{
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
Checksum checksum = new Adler32();
checksum.update(compressed.buffer, 0, chunkSize);
if (out.readInt() != (int) checksum.getValue())
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
catch (CorruptBlockException e)
{
throw new CorruptSSTableException(e, getPath());
}
catch (EOFException e)
{
throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
}
catch (IOException e)
{
throw new FSReadError(e, getPath());
}
// reset buffer
validBufferBytes = realMark.bufferOffset;
bufferOffset = current - validBufferBytes;
chunkCount = realMark.nextChunkIndex - 1;
// mark as dirty so we don't lose the bytes on subsequent reBuffer calls
isDirty = true;
// truncate data and index file
truncate(chunkOffset);
metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
}
示例5: resetAndTruncate
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
assert mark instanceof CompressedFileWriterMark;
CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;
// reset position
current = realMark.uncDataOffset;
if (realMark.chunkOffset == chunkOffset) // current buffer
{
// just reset a buffer offset and return
validBufferBytes = realMark.bufferOffset;
return;
}
// synchronize current buffer with disk
// because we don't want any data loss
syncInternal();
// setting marker as a current offset
chunkOffset = realMark.chunkOffset;
// compressed chunk size (- 4 bytes reserved for checksum)
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
if (compressed.buffer.length < chunkSize)
compressed.buffer = new byte[chunkSize];
try
{
out.seek(chunkOffset);
out.readFully(compressed.buffer, 0, chunkSize);
int validBytes;
try
{
// decompress data chunk and store its length
validBytes = compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
}
catch (IOException e)
{
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
checksum.update(compressed.buffer, 0, chunkSize);
if (out.readInt() != (int) checksum.getValue())
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
catch (CorruptBlockException e)
{
throw new CorruptSSTableException(e, getPath());
}
catch (EOFException e)
{
throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
}
catch (IOException e)
{
throw new FSReadError(e, getPath());
}
checksum.reset();
// reset buffer
validBufferBytes = realMark.bufferOffset;
bufferOffset = current - validBufferBytes;
chunkCount = realMark.nextChunkIndex - 1;
// truncate data and index file
truncate(chunkOffset);
metadataWriter.resetAndTruncate(realMark.nextChunkIndex);
}
示例6: resetAndTruncate
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
assert mark instanceof CompressedFileWriterMark;
CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;
// reset position
current = realMark.uncDataOffset;
if (realMark.chunkOffset == chunkOffset) // current buffer
{
// just reset a buffer offset and return
validBufferBytes = realMark.bufferOffset;
return;
}
// synchronize current buffer with disk
// because we don't want any data loss
syncInternal();
// setting marker as a current offset
chunkOffset = realMark.chunkOffset;
// compressed chunk size (- 4 bytes reserved for checksum)
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
if (compressed.buffer.length < chunkSize)
compressed.buffer = new byte[chunkSize];
try
{
out.seek(chunkOffset);
out.readFully(compressed.buffer, 0, chunkSize);
try
{
// repopulate buffer
compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
}
catch (IOException e)
{
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
Checksum checksum = new Adler32();
checksum.update(compressed.buffer, 0, chunkSize);
if (out.readInt() != (int) checksum.getValue())
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
catch (CorruptBlockException e)
{
throw new CorruptSSTableException(e, getPath());
}
catch (EOFException e)
{
throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
}
catch (IOException e)
{
throw new FSReadError(e, getPath());
}
// reset buffer
validBufferBytes = realMark.bufferOffset;
bufferOffset = current - validBufferBytes;
chunkCount = realMark.nextChunkIndex - 1;
// truncate data and index file
truncate(chunkOffset);
metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
}
示例7: resetAndTruncate
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
assert mark instanceof CompressedFileWriterMark;
CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;
// reset position
current = realMark.uncDataOffset;
if (realMark.chunkOffset == chunkOffset) // current buffer
{
// just reset a buffer offset and return
validBufferBytes = realMark.bufferOffset;
return;
}
// synchronize current buffer with disk
// because we don't want any data loss
syncInternal();
// setting marker as a current offset
chunkOffset = realMark.chunkOffset;
// compressed chunk size (- 4 bytes reserved for checksum)
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
if (compressed.buffer.length < chunkSize)
compressed.buffer = new byte[chunkSize];
try
{
out.seek(chunkOffset);
out.readFully(compressed.buffer, 0, chunkSize);
int validBytes;
try
{
// decompress data chunk and store its length
validBytes = compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
}
catch (IOException e)
{
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
checksum.update(buffer, 0, validBytes);
if (out.readInt() != (int) checksum.getValue())
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
catch (CorruptBlockException e)
{
throw new CorruptSSTableException(e, getPath());
}
catch (EOFException e)
{
throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
}
catch (IOException e)
{
throw new FSReadError(e, getPath());
}
checksum.reset();
// reset buffer
validBufferBytes = realMark.bufferOffset;
bufferOffset = current - validBufferBytes;
chunkCount = realMark.nextChunkIndex - 1;
// truncate data and index file
truncate(chunkOffset);
metadataWriter.resetAndTruncate(realMark.nextChunkIndex);
}
示例8: resetAndTruncate
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
assert mark instanceof CompressedFileWriterMark;
CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;
// reset position
current = realMark.uncDataOffset;
if (realMark.chunkOffset == chunkOffset) // current buffer
{
// just reset a buffer offset and return
validBufferBytes = realMark.bufferOffset;
return;
}
// synchronize current buffer with disk
// because we don't want any data loss
syncInternal();
// setting marker as a current offset
chunkOffset = realMark.chunkOffset;
// compressed chunk size (- 4 bytes reserved for checksum)
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
if (compressed.buffer.length < chunkSize)
compressed.buffer = new byte[chunkSize];
try
{
out.seek(chunkOffset);
out.readFully(compressed.buffer, 0, chunkSize);
try
{
// repopulate buffer
compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
}
catch (IOException e)
{
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
checksum.update(compressed.buffer, 0, chunkSize);
if (out.readInt() != (int) checksum.getValue())
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
catch (CorruptBlockException e)
{
throw new CorruptSSTableException(e, getPath());
}
catch (EOFException e)
{
throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
}
catch (IOException e)
{
throw new FSReadError(e, getPath());
}
checksum.reset();
// reset buffer
validBufferBytes = realMark.bufferOffset;
bufferOffset = current - validBufferBytes;
chunkCount = realMark.nextChunkIndex - 1;
// truncate data and index file
truncate(chunkOffset);
metadataWriter.resetAndTruncate(realMark.nextChunkIndex);
}
示例9: resetAndTruncate
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
@Override
public synchronized void resetAndTruncate(FileMark mark)
{
assert mark instanceof CompressedFileWriterMark;
CompressedFileWriterMark realMark = (CompressedFileWriterMark) mark;
// reset position
current = realMark.uncDataOffset;
if (realMark.chunkOffset == chunkOffset) // current buffer
{
// just reset a buffer offset and return
validBufferBytes = realMark.bufferOffset;
return;
}
// synchronize current buffer with disk
// because we don't want any data loss
syncInternal();
// setting marker as a current offset
chunkOffset = realMark.chunkOffset;
// compressed chunk size (- 4 bytes reserved for checksum)
int chunkSize = (int) (metadataWriter.chunkOffsetBy(realMark.nextChunkIndex) - chunkOffset - 4);
if (compressed.buffer.length < chunkSize)
compressed.buffer = new byte[chunkSize];
try
{
out.seek(chunkOffset);
out.readFully(compressed.buffer, 0, chunkSize);
int validBytes;
try
{
// decompress data chunk and store its length
validBytes = compressor.uncompress(compressed.buffer, 0, chunkSize, buffer, 0);
}
catch (IOException e)
{
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
checksum.update(buffer, 0, validBytes);
if (out.readInt() != (int) checksum.getValue())
throw new CorruptBlockException(getPath(), chunkOffset, chunkSize);
}
catch (CorruptBlockException e)
{
throw new CorruptSSTableException(e, getPath());
}
catch (EOFException e)
{
throw new CorruptSSTableException(new CorruptBlockException(getPath(), chunkOffset, chunkSize), getPath());
}
catch (IOException e)
{
throw new FSReadError(e, getPath());
}
checksum.reset();
// reset buffer
validBufferBytes = realMark.bufferOffset;
bufferOffset = current - validBufferBytes;
chunkCount = realMark.nextChunkIndex - 1;
// truncate data and index file
truncate(chunkOffset);
metadataWriter.resetAndTruncate(realMark.nextChunkIndex - 1);
}
示例10: mark
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
public FileMark mark() {
markedPointer = current;
return new BufferedRandomAccessFileMark(markedPointer);
}
示例11: reset
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
public void reset(FileMark mark) {
assert mark instanceof BufferedRandomAccessFileMark;
seek(((BufferedRandomAccessFileMark) mark).pointer);
}
示例12: bytesPastMark
import org.apache.cassandra.io.util.FileMark; //导入依赖的package包/类
public long bytesPastMark(FileMark mark) {
assert mark instanceof BufferedRandomAccessFileMark;
long bytes = current - ((BufferedRandomAccessFileMark) mark).pointer;
assert bytes >= 0;
return bytes;
}