本文整理汇总了Java中org.apache.cassandra.utils.FBUtilities.copy方法的典型用法代码示例。如果您正苦于以下问题:Java FBUtilities.copy方法的具体用法?Java FBUtilities.copy怎么用?Java FBUtilities.copy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.utils.FBUtilities
的用法示例。
在下文中一共展示了FBUtilities.copy方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: serialize
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public void serialize(IndexSummary t, DataOutputStream out) throws IOException
{
out.writeInt(t.indexInterval);
out.writeInt(t.summary_size);
out.writeLong(t.bytes.size());
FBUtilities.copy(new MemoryInputStream(t.bytes), out, t.bytes.size());
}
示例2: deserialize
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public IndexSummary deserialize(DataInputStream in, IPartitioner partitioner) throws IOException
{
int indexInterval = in.readInt();
int summary_size = in.readInt();
long offheap_size = in.readLong();
Memory memory = Memory.allocate(offheap_size);
FBUtilities.copy(in, new MemoryOutputStream(memory), offheap_size);
return new IndexSummary(partitioner, memory, summary_size, indexInterval);
}
示例3: deserialize
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
@SuppressWarnings("resource")
public IndexSummary deserialize(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel, int expectedMinIndexInterval, int maxIndexInterval) throws IOException
{
int minIndexInterval = in.readInt();
if (minIndexInterval != expectedMinIndexInterval)
{
throw new IOException(String.format("Cannot read index summary because min_index_interval changed from %d to %d.",
minIndexInterval, expectedMinIndexInterval));
}
int offsetCount = in.readInt();
long offheapSize = in.readLong();
int samplingLevel, fullSamplingSummarySize;
if (haveSamplingLevel)
{
samplingLevel = in.readInt();
fullSamplingSummarySize = in.readInt();
}
else
{
samplingLevel = BASE_SAMPLING_LEVEL;
fullSamplingSummarySize = offsetCount;
}
int effectiveIndexInterval = (int) Math.ceil((BASE_SAMPLING_LEVEL / (double) samplingLevel) * minIndexInterval);
if (effectiveIndexInterval > maxIndexInterval)
{
throw new IOException(String.format("Rebuilding index summary because the effective index interval (%d) is higher than" +
" the current max index interval (%d)", effectiveIndexInterval, maxIndexInterval));
}
Memory offsets = Memory.allocate(offsetCount * 4);
Memory entries = Memory.allocate(offheapSize - offsets.size());
try
{
FBUtilities.copy(in, new MemoryOutputStream(offsets), offsets.size());
FBUtilities.copy(in, new MemoryOutputStream(entries), entries.size());
}
catch (IOException ioe)
{
offsets.free();
entries.free();
throw ioe;
}
// our on-disk representation treats the offsets and the summary data as one contiguous structure,
// in which the offsets are based from the start of the structure. i.e., if the offsets occupy
// X bytes, the value of the first offset will be X. In memory we split the two regions up, so that
// the summary values are indexed from zero, so we apply a correction to the offsets when de/serializing.
// In this case subtracting X from each of the offsets.
for (int i = 0 ; i < offsets.size() ; i += 4)
offsets.setInt(i, (int) (offsets.getInt(i) - offsets.size()));
return new IndexSummary(partitioner, offsets, offsetCount, entries, entries.size(), fullSamplingSummarySize, minIndexInterval, samplingLevel);
}
示例4: deserialize
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public IndexSummary deserialize(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel, int expectedMinIndexInterval, int maxIndexInterval) throws IOException
{
int minIndexInterval = in.readInt();
if (minIndexInterval != expectedMinIndexInterval)
{
throw new IOException(String.format("Cannot read index summary because min_index_interval changed from %d to %d.",
minIndexInterval, expectedMinIndexInterval));
}
int offsetCount = in.readInt();
long offheapSize = in.readLong();
int samplingLevel, fullSamplingSummarySize;
if (haveSamplingLevel)
{
samplingLevel = in.readInt();
fullSamplingSummarySize = in.readInt();
}
else
{
samplingLevel = BASE_SAMPLING_LEVEL;
fullSamplingSummarySize = offsetCount;
}
int effectiveIndexInterval = (int) Math.ceil((BASE_SAMPLING_LEVEL / (double) samplingLevel) * minIndexInterval);
if (effectiveIndexInterval > maxIndexInterval)
{
throw new IOException(String.format("Rebuilding index summary because the effective index interval (%d) is higher than" +
" the current max index interval (%d)", effectiveIndexInterval, maxIndexInterval));
}
Memory offsets = Memory.allocate(offsetCount * 4);
Memory entries = Memory.allocate(offheapSize - offsets.size());
FBUtilities.copy(in, new MemoryOutputStream(offsets), offsets.size());
FBUtilities.copy(in, new MemoryOutputStream(entries), entries.size());
// our on-disk representation treats the offsets and the summary data as one contiguous structure,
// in which the offsets are based from the start of the structure. i.e., if the offsets occupy
// X bytes, the value of the first offset will be X. In memory we split the two regions up, so that
// the summary values are indexed from zero, so we apply a correction to the offsets when de/serializing.
// In this case subtracting X from each of the offsets.
for (int i = 0 ; i < offsets.size() ; i += 4)
offsets.setInt(i, (int) (offsets.getInt(i) - offsets.size()));
return new IndexSummary(partitioner, offsets, offsetCount, entries, entries.size(), fullSamplingSummarySize, minIndexInterval, samplingLevel);
}