本文整理汇总了Java中org.apache.cassandra.io.util.FileDataInput.readInt方法的典型用法代码示例。如果您正苦于以下问题:Java FileDataInput.readInt方法的具体用法?Java FileDataInput.readInt怎么用?Java FileDataInput.readInt使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.io.util.FileDataInput
的用法示例。
在下文中一共展示了FileDataInput.readInt方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deserialize
import org.apache.cassandra.io.util.FileDataInput; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, FileDataInput in, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components = new EnumMap<>(MetadataType.class);
// read number of components
int numComponents = in.readInt();
// read toc
Map<MetadataType, Integer> toc = new EnumMap<>(MetadataType.class);
MetadataType[] values = MetadataType.values();
for (int i = 0; i < numComponents; i++)
{
toc.put(values[in.readInt()], in.readInt());
}
for (MetadataType type : types)
{
Integer offset = toc.get(type);
if (offset != null)
{
in.seek(offset);
MetadataComponent component = type.serializer.deserialize(descriptor.version, in);
components.put(type, component);
}
}
return components;
}
示例2: deserialize
import org.apache.cassandra.io.util.FileDataInput; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, FileDataInput in, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components = Maps.newHashMap();
// read number of components
int numComponents = in.readInt();
// read toc
Map<MetadataType, Integer> toc = new HashMap<>(numComponents);
for (int i = 0; i < numComponents; i++)
{
toc.put(MetadataType.values()[in.readInt()], in.readInt());
}
for (MetadataType type : types)
{
MetadataComponent component = null;
if (toc.containsKey(type))
{
in.seek(toc.get(type));
component = type.serializer.deserialize(descriptor.version, in);
}
components.put(type, component);
}
return components;
}
示例3: deserialize
import org.apache.cassandra.io.util.FileDataInput; //导入方法依赖的package包/类
public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, FileDataInput in, EnumSet<MetadataType> types) throws IOException
{
Map<MetadataType, MetadataComponent> components = Maps.newHashMap();
// read number of components
int numComponents = in.readInt();
// read toc
Map<MetadataType, Integer> toc = new HashMap<>(numComponents);
MetadataType[] values = MetadataType.values();
for (int i = 0; i < numComponents; i++)
{
toc.put(values[in.readInt()], in.readInt());
}
for (MetadataType type : types)
{
Integer offset = toc.get(type);
if (offset != null)
{
in.seek(offset);
MetadataComponent component = type.serializer.deserialize(descriptor.version, in);
components.put(type, component);
}
}
return components;
}
示例4: readSimpleColumns
import org.apache.cassandra.io.util.FileDataInput; //导入方法依赖的package包/类
private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<OnDiskAtom> result) throws IOException
{
OnDiskAtom.Serializer atomSerializer = cf.getOnDiskSerializer();
int count = file.readInt();
int n = 0;
for (int i = 0; i < count; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
if (column instanceof IColumn)
{
if (columnNames.contains(column.name()))
{
result.add(column);
if (++n >= columns.size())
break;
}
}
else
{
result.add(column);
}
}
}