本文整理汇总了Java中org.apache.cassandra.io.util.SequentialWriter类的典型用法代码示例。如果您正苦于以下问题:Java SequentialWriter类的具体用法?Java SequentialWriter怎么用?Java SequentialWriter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SequentialWriter类属于org.apache.cassandra.io.util包,在下文中一共展示了SequentialWriter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: flushAndClear
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
@Override
public void flushAndClear(SequentialWriter out) throws IOException
{
super.flushAndClear(out);
out.writeInt((sparseValueTerms == 0) ? -1 : offset);
if (containers.size() > 0)
{
for (TokenTreeBuilder tokens : containers)
tokens.write(out);
}
if (sparseValueTerms > 0)
{
combinedIndex.finish().write(out);
}
alignToBlock(out);
containers.clear();
combinedIndex = new TokenTreeBuilder();
offset = 0;
sparseValueTerms = 0;
}
示例2: testSerializedSize
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
@Test
public void testSerializedSize() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(tokens).finish();
final File treeFile = File.createTempFile("token-tree-size-test", "tt");
treeFile.deleteOnExit();
final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
builder.write(writer);
writer.close();
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
Assert.assertEquals((int) reader.bytesRemaining(), builder.serializedSize());
}
示例3: skipPastEnd
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
@Test
public void skipPastEnd() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(simpleTokenMap).finish();
final File treeFile = File.createTempFile("token-tree-skip-past-test", "tt");
treeFile.deleteOnExit();
final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
builder.write(writer);
writer.close();
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
final RangeIterator<Long, Token> tokenTree = new TokenTree(new MappedBuffer(reader)).iterator(KEY_CONVERTER);
tokenTree.skipTo(simpleTokenMap.lastKey() + 10);
}
示例4: writeMetadata
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
private static void writeMetadata(Descriptor desc, Map<MetadataType, MetadataComponent> components)
{
SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(Component.STATS)));
try
{
desc.getMetadataSerializer().serialize(components, out.stream);
}
catch (IOException e)
{
throw new FSWriteError(e, out.getPath());
}
finally
{
out.close();
}
}
示例5: testGetFilePointer
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
@Test
public void testGetFilePointer() throws IOException {
final SequentialWriter w = createTempFile("brafGetFilePointer");
assertEquals(w.getFilePointer(), 0); // initial position should be 0
w.write(generateByteArray(20));
assertEquals(w.getFilePointer(), 20); // position 20 after writing 20 bytes
w.sync();
RandomAccessReader r = RandomAccessReader.open(w, fs);
// position should change after skip bytes
r.seek(0);
r.skipBytes(15);
assertEquals(r.getFilePointer(), 15);
r.read();
assertEquals(r.getFilePointer(), 16);
r.read(new byte[4]);
assertEquals(r.getFilePointer(), 20);
w.close();
r.close();
}
示例6: testBytesRemaining
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
@Test
public void testBytesRemaining() throws IOException {
SequentialWriter w = createTempFile("brafBytesRemaining");
int toWrite = RandomAccessReader.DEFAULT_BUFFER_SIZE + 10;
w.write(generateByteArray(toWrite));
w.sync();
RandomAccessReader r = RandomAccessReader.open(w, fs);
assertEquals(r.bytesRemaining(), toWrite);
for (int i = 1; i <= r.length(); i++) {
r.read();
assertEquals(r.bytesRemaining(), r.length() - i);
}
r.seek(0);
r.skipBytes(10);
assertEquals(r.bytesRemaining(), r.length() - 10);
w.close();
r.close();
}
示例7: ColumnIndex
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
public ColumnIndex(SerializationHeader header,
SequentialWriter writer,
Version version,
Collection<SSTableFlushObserver> observers,
ISerializer<IndexInfo> indexInfoSerializer)
{
this.header = header;
this.writer = writer;
this.version = version.correspondingMessagingVersion();
this.observers = observers;
this.idxSerializer = indexInfoSerializer;
}
示例8: writeMetadata
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
private void writeMetadata(Descriptor desc, Map<MetadataType, MetadataComponent> components)
{
try (SequentialWriter out = new SequentialWriter(desc.filenameFor(Component.STATS), writerOption, desc.getConfiguration()))
{
desc.getMetadataSerializer().serialize(components, out, desc.version);
out.finish();
}
catch (IOException e)
{
throw new FSWriteError(e, desc.filenameFor(Component.STATS));
}
}
示例9: IndexWriter
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
IndexWriter(long keyCount)
{
indexFile = new SequentialWriter(descriptor.filenameFor(Component.PRIMARY_INDEX),
writerOption, descriptor.getConfiguration());
builder = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX))
.withConfiguration(descriptor.getConfiguration());
//chunkCache.ifPresent(builder::withChunkCache);
summary = new IndexSummaryBuilder(keyCount, metadata.params.minIndexInterval, Downsampling.BASE_SAMPLING_LEVEL);
bf = FilterFactory.getFilter(keyCount, metadata.params.bloomFilterFpChance, true,
descriptor.version.hasOldBfHashOrder());
// register listeners to be alerted when the data files are flushed
indexFile.setPostFlushListener(() -> summary.markIndexSynced(indexFile.getLastFlushOffset()));
dataFile.setPostFlushListener(() -> summary.markDataSynced(dataFile.getLastFlushOffset()));
}
示例10: open
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
public static SequentialWriter open(String dataFilePath,
String indexFilePath,
boolean skipIOCache,
CompressionParameters parameters,
Collector sstableMetadataCollector)
{
return new CompressedSequentialWriter(new File(dataFilePath), indexFilePath, skipIOCache, parameters, sstableMetadataCollector);
}
示例11: addTerm
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
private void addTerm(InMemoryDataTerm term, SequentialWriter out) throws IOException
{
InMemoryPointerTerm ptr = dataLevel.add(term);
if (ptr == null)
return;
int levelIdx = 0;
for (;;)
{
MutableLevel<InMemoryPointerTerm> level = getIndexLevel(levelIdx++, out);
if ((ptr = level.add(ptr)) == null)
break;
}
}
示例12: getIndexLevel
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
private MutableLevel<InMemoryPointerTerm> getIndexLevel(int idx, SequentialWriter out)
{
if (levels.size() == 0)
levels.add(new MutableLevel<>(out, new MutableBlock<InMemoryPointerTerm>()));
if (levels.size() - 1 < idx)
{
int toAdd = idx - (levels.size() - 1);
for (int i = 0; i < toAdd; i++)
levels.add(new MutableLevel<>(out, new MutableBlock<InMemoryPointerTerm>()));
}
return levels.get(idx);
}
示例13: buildSerializeAndIterate
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
@Test
public void buildSerializeAndIterate() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(simpleTokenMap).finish();
final File treeFile = File.createTempFile("token-tree-iterate-test1", "tt");
treeFile.deleteOnExit();
final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
builder.write(writer);
writer.close();
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
final TokenTree tokenTree = new TokenTree(new MappedBuffer(reader));
final Iterator<Token> tokenIterator = tokenTree.iterator(KEY_CONVERTER);
final Iterator<Map.Entry<Long, LongSet>> listIterator = simpleTokenMap.entrySet().iterator();
while (tokenIterator.hasNext() && listIterator.hasNext())
{
Token treeNext = tokenIterator.next();
Map.Entry<Long, LongSet> listNext = listIterator.next();
Assert.assertEquals(listNext.getKey(), treeNext.get());
Assert.assertEquals(convert(listNext.getValue()), convert(treeNext));
}
Assert.assertFalse("token iterator not finished", tokenIterator.hasNext());
Assert.assertFalse("list iterator not finished", listIterator.hasNext());
reader.close();
}
示例14: generateTree
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
private static TokenTree generateTree(final long minToken, final long maxToken) throws IOException
{
final SortedMap<Long, LongSet> toks = new TreeMap<Long, LongSet>()
{{
for (long i = minToken; i <= maxToken; i++)
{
LongSet offsetSet = new LongOpenHashSet();
offsetSet.add(i);
put(i, offsetSet);
}
}};
final TokenTreeBuilder builder = new TokenTreeBuilder(toks).finish();
final File treeFile = File.createTempFile("token-tree-get-test", "tt");
treeFile.deleteOnExit();
final SequentialWriter writer = new SequentialWriter(treeFile, 4096, false);
builder.write(writer);
writer.close();
RandomAccessReader reader = null;
try
{
reader = RandomAccessReader.open(treeFile);
return new TokenTree(new MappedBuffer(reader));
}
finally
{
FileUtils.closeQuietly(reader);
}
}
示例15: writeAndBuildIndex
import org.apache.cassandra.io.util.SequentialWriter; //导入依赖的package包/类
public static ColumnIndex writeAndBuildIndex(UnfilteredRowIterator iterator, SequentialWriter output, SerializationHeader header, Version version) throws IOException
{
assert !iterator.isEmpty() && version.storeRows();
Builder builder = new Builder(iterator, output, header, version.correspondingMessagingVersion());
return builder.build();
}