本文整理汇总了Java中org.apache.cassandra.io.util.FileDataInput类的典型用法代码示例。如果您正苦于以下问题:Java FileDataInput类的具体用法?Java FileDataInput怎么用?Java FileDataInput使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FileDataInput类属于org.apache.cassandra.io.util包,在下文中一共展示了FileDataInput类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: create
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
public static SSTableIdentityIterator create(SSTableReader sstable, FileDataInput dfile, RowIndexEntry<?> indexEntry, DecoratedKey key, boolean tombstoneOnly)
{
try
{
dfile.seek(indexEntry.position);
ByteBufferUtil.skipShortLength(dfile); // Skip partition key
DeletionTime partitionLevelDeletion = DeletionTime.serializer.deserialize(dfile);
SerializationHelper helper = new SerializationHelper(sstable.metadata, sstable.descriptor.version.correspondingMessagingVersion(), SerializationHelper.Flag.LOCAL);
SSTableSimpleIterator iterator = tombstoneOnly
? SSTableSimpleIterator.createTombstoneOnly(sstable.metadata, dfile, sstable.header, helper, partitionLevelDeletion)
: SSTableSimpleIterator.create(sstable.metadata, dfile, sstable.header, helper, partitionLevelDeletion);
return new SSTableIdentityIterator(sstable, key, partitionLevelDeletion, dfile.getPath(), iterator);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, dfile.getPath());
}
}
示例2: deserialize
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, FileDataInput in, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components = new EnumMap<>(MetadataType.class);
// read number of components
int numComponents = in.readInt();
// read toc
Map<MetadataType, Integer> toc = new EnumMap<>(MetadataType.class);
MetadataType[] values = MetadataType.values();
for (int i = 0; i < numComponents; i++)
{
toc.put(values[in.readInt()], in.readInt());
}
for (MetadataType type : types)
{
Integer offset = toc.get(type);
if (offset != null)
{
in.seek(offset);
MetadataComponent component = type.serializer.deserialize(descriptor.version, in);
components.put(type, component);
}
}
return components;
}
示例3: SSTableNamesIterator
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<CellName> columns, RowIndexEntry indexEntry)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
try
{
read(sstable, file, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
}
示例4: readSimpleColumns
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
private void readSimpleColumns(FileDataInput file, SortedSet<CellName> columnNames, List<OnDiskAtom> result)
{
Iterator<OnDiskAtom> atomIterator = cf.metadata().getOnDiskIterator(file, sstable.descriptor.version);
int n = 0;
while (atomIterator.hasNext())
{
OnDiskAtom column = atomIterator.next();
if (column instanceof Cell)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (++n >= columns.size())
break;
}
}
else
{
result.add(column);
}
}
}
示例5: skipIndex
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
/**
* Skip the index
* @param in the data input from which the index should be skipped
* @throws IOException if an I/O error occurs.
*/
public static void skipIndex(DataInput in) throws IOException
{
/* read only the column index list */
int columnIndexSize = in.readInt();
/* skip the column index data */
if (in instanceof FileDataInput)
{
FileUtils.skipBytesFully(in, columnIndexSize);
}
else
{
// skip bytes
byte[] skip = new byte[columnIndexSize];
in.readFully(skip);
}
}
示例6: deserializeIndex
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
/**
* Deserialize the index into a structure and return it
*
* @param in input source
* @param type the comparator type for the column family
*
* @return ArrayList<IndexInfo> - list of de-serialized indexes
* @throws IOException if an I/O error occurs.
*/
public static List<IndexInfo> deserializeIndex(FileDataInput in, CType type) throws IOException
{
int columnIndexSize = in.readInt();
if (columnIndexSize == 0)
return Collections.<IndexInfo>emptyList();
ArrayList<IndexInfo> indexList = new ArrayList<IndexInfo>();
FileMark mark = in.mark();
ISerializer<IndexInfo> serializer = type.indexSerializer();
while (in.bytesPastMark(mark) < columnIndexSize)
{
indexList.add(serializer.deserialize(in));
}
assert in.bytesPastMark(mark) == columnIndexSize;
return indexList;
}
示例7: deserialize
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, FileDataInput in, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components = Maps.newHashMap();
// read number of components
int numComponents = in.readInt();
// read toc
Map<MetadataType, Integer> toc = new HashMap<>(numComponents);
for (int i = 0; i < numComponents; i++)
{
toc.put(MetadataType.values()[in.readInt()], in.readInt());
}
for (MetadataType type : types)
{
MetadataComponent component = null;
if (toc.containsKey(type))
{
in.seek(toc.get(type));
component = type.serializer.deserialize(descriptor.version, in);
}
components.put(type, component);
}
return components;
}
示例8: setToRowStart
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
/**
* Sets the seek position to the start of the row for column scanning.
*/
private void setToRowStart(RowIndexEntry rowEntry, FileDataInput in) throws IOException
{
if (in == null)
{
this.file = sstable.getFileDataInput(rowEntry.position);
}
else
{
this.file = in;
in.seek(rowEntry.position);
}
sstable.partitioner.decorateKey(ByteBufferUtil.readWithShortLength(file));
if (sstable.descriptor.version.hasRowSizeAndColumnCount)
file.readLong();
}
示例9: SSTableNamesIterator
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
{
assert columns != null;
this.sstable = sstable;
this.columns = columns;
this.key = key;
try
{
read(sstable, file, indexEntry);
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, sstable.getFilename());
}
}
示例10: readSimpleColumns
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<OnDiskAtom> result, int columnCount)
{
Iterator<OnDiskAtom> atomIterator = cf.metadata().getOnDiskIterator(file, columnCount, sstable.descriptor.version);
int n = 0;
while (atomIterator.hasNext())
{
OnDiskAtom column = atomIterator.next();
if (column instanceof Column)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (++n >= columns.size())
break;
}
}
else
{
result.add(column);
}
}
}
示例11: deserializeIndex
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
/**
* Deserialize the index into a structure and return it
*
* @param in - input source
*
* @return ArrayList<IndexInfo> - list of de-serialized indexes
* @throws IOException if an I/O error occurs.
*/
public static List<IndexInfo> deserializeIndex(FileDataInput in) throws IOException
{
int columnIndexSize = in.readInt();
if (columnIndexSize == 0)
return Collections.<IndexInfo>emptyList();
ArrayList<IndexInfo> indexList = new ArrayList<IndexInfo>();
FileMark mark = in.mark();
while (in.bytesPastMark(mark) < columnIndexSize)
{
indexList.add(IndexInfo.deserialize(in));
}
assert in.bytesPastMark(mark) == columnIndexSize;
return indexList;
}
示例12: deserialize
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, FileDataInput in, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components = Maps.newHashMap();
// read number of components
int numComponents = in.readInt();
// read toc
Map<MetadataType, Integer> toc = new HashMap<>(numComponents);
MetadataType[] values = MetadataType.values();
for (int i = 0; i < numComponents; i++)
{
toc.put(values[in.readInt()], in.readInt());
}
for (MetadataType type : types)
{
Integer offset = toc.get(type);
if (offset != null)
{
in.seek(offset);
MetadataComponent component = type.serializer.deserialize(descriptor.version, in);
components.put(type, component);
}
}
return components;
}
示例13: readSimpleColumns
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
private void readSimpleColumns(FileDataInput file, SortedSet<CellName> columnNames, List<OnDiskAtom> result, int columnCount)
{
Iterator<OnDiskAtom> atomIterator = cf.metadata().getOnDiskIterator(file, columnCount, sstable.descriptor.version);
int n = 0;
while (atomIterator.hasNext())
{
OnDiskAtom column = atomIterator.next();
if (column instanceof Cell)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (++n >= columns.size())
break;
}
}
else
{
result.add(column);
}
}
}
示例14: readSimpleColumns
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int count = file.readInt();
int n = 0;
for (int i = 0; i < count; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (++n >= columns.size())
break;
}
}
else
{
result.add(column);
}
}
}
示例15: bytesReadForUnconsumedData
import org.apache.cassandra.io.util.FileDataInput; //导入依赖的package包/类
public long bytesReadForUnconsumedData()
{
if (!(in instanceof FileDataInput))
throw new AssertionError();
return currentPosition() - lastConsumedPosition;
}