本文整理汇总了Java中org.apache.flink.core.memory.MemorySegment.size方法的典型用法代码示例。如果您正苦于以下问题:Java MemorySegment.size方法的具体用法?Java MemorySegment.size怎么用?Java MemorySegment.size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.core.memory.MemorySegment
的用法示例。
在下文中一共展示了MemorySegment.size方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: BuildSideBuffer
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private BuildSideBuffer(MemorySegment initialSegment, MemorySegmentSource memSource) {
super(initialSegment, initialSegment.size(), 0);
this.targetList = new ArrayList<MemorySegment>();
this.memSource = memSource;
this.sizeBits = MathUtils.log2strict(initialSegment.size());
}
示例2: ChannelWriterOutputView
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
* Creates an new ChannelWriterOutputView that writes to the given channel and buffers data
* in the given memory segments. If the given memory segments are null, the writer takes its buffers
* directly from the return queue of the writer. Note that this variant locks if no buffers are contained
* in the return queue.
*
* @param writer The writer to write to.
* @param memory The memory used to buffer data, or null, to utilize solely the return queue.
* @param segmentSize The size of the memory segments.
*/
public ChannelWriterOutputView(BlockChannelWriter<MemorySegment> writer, List<MemorySegment> memory, int segmentSize) {
super(segmentSize, HEADER_LENGTH);
if (writer == null) {
throw new NullPointerException();
}
this.writer = writer;
if (memory == null) {
this.numSegments = 0;
} else {
this.numSegments = memory.size();
// load the segments into the queue
final LinkedBlockingQueue<MemorySegment> queue = writer.getReturnQueue();
for (int i = memory.size() - 1; i >= 0; --i) {
final MemorySegment seg = memory.get(i);
if (seg.size() != segmentSize) {
throw new IllegalArgumentException("The supplied memory segments are not of the specified size.");
}
queue.add(seg);
}
}
// get the first segment
try {
advance();
}
catch (IOException ioex) {
throw new RuntimeException("BUG: IOException occurred while getting first block for ChannelWriterOutputView.", ioex);
}
}
示例3: onBuffer
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void onBuffer(Buffer buffer) {
final MemorySegment segment = buffer.getMemorySegment();
int expected = getNumberOfReadBuffers() * (segment.size() / 4);
for (int i = 0; i < segment.size(); i += 4) {
assertEquals(expected, segment.getInt(i));
expected++;
}
super.onBuffer(buffer);
}
示例4: channelReadWriteOneSegment
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Test
public void channelReadWriteOneSegment() {
final int NUM_IOS = 1111;
try {
final FileIOChannel.ID channelID = this.ioManager.createChannel();
final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channelID);
MemorySegment memSeg = MemorySegmentFactory.allocateUnpooledSegment(32 * 1024);
for (int i = 0; i < NUM_IOS; i++) {
for (int pos = 0; pos < memSeg.size(); pos += 4) {
memSeg.putInt(pos, i);
}
writer.writeBlock(memSeg);
memSeg = writer.getNextReturnedBlock();
}
writer.close();
final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channelID);
for (int i = 0; i < NUM_IOS; i++) {
reader.readBlock(memSeg);
memSeg = reader.getNextReturnedBlock();
for (int pos = 0; pos < memSeg.size(); pos += 4) {
if (memSeg.getInt(pos) != i) {
fail("Read memory segment contains invalid data.");
}
}
}
reader.closeAndDelete();
}
catch (Exception ex) {
ex.printStackTrace();
fail("Test encountered an exception: " + ex.getMessage());
}
}
示例5: getLimitForSegment
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
protected int getLimitForSegment(MemorySegment segment) {
return numBlocksRemaining > 0 ? segment.size() : sizeOfLastBlock;
}
示例6: getLimitForSegment
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
protected int getLimitForSegment(MemorySegment segment) {
return this.numBlocksRemaining > 0 ? segment.size() : this.lastBlockBytes;
}
示例7: testBucketsNotFulfillSegment
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Test
public void testBucketsNotFulfillSegment() throws Exception {
final int NUM_KEYS = 10000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 30000 pairs with 3 values sharing the same key
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
// create a probe input that gives 100000 pairs with 10 values sharing a key
MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
// 33 is minimum number of pages required to perform hash join this inputs
memSegments = this.memManager.allocatePages(MEM_OWNER, 33);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// For FLINK-2545, the buckets data may not fulfill it's buffer, for example, the buffer may contains 256 buckets,
// while hash table only assign 250 bucket on it. The unused buffer bytes may contains arbitrary data, which may
// influence hash table if forget to skip it. To mock this, put the invalid bucket data(partition=1, inMemory=true, count=-1)
// at the end of buffer.
for (MemorySegment segment : memSegments) {
int newBucketOffset = segment.size() - 128;
// initialize the header fields
segment.put(newBucketOffset + 0, (byte)0);
segment.put(newBucketOffset + 1, (byte)0);
segment.putShort(newBucketOffset + 2, (short) -1);
segment.putLong(newBucketOffset + 4, ~0x0L);
}
// ----------------------------------------------------------------------------------------
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(
this.pairBuildSideAccesssor, this.pairProbeSideAccesssor,
this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator,
memSegments, ioManager);
join.open(buildInput, probeInput);
final IntPair recordReuse = new IntPair();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult);
join.close();
this.memManager.release(join.getFreedMemory());
}
示例8: NetworkBuffer
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
* Creates a new buffer instance backed by the given <tt>memorySegment</tt> with <tt>0</tt> for
* the <tt>readerIndex</tt> and <tt>writerIndex</tt>.
*
* @param memorySegment
* backing memory segment (defines {@link #maxCapacity})
* @param recycler
* will be called to recycle this buffer once the reference count is <tt>0</tt>
* @param isBuffer
* whether this buffer represents a buffer (<tt>true</tt>) or an event (<tt>false</tt>)
* @param size
* current size of data in the buffer, i.e. the writer index to set
*/
public NetworkBuffer(MemorySegment memorySegment, BufferRecycler recycler, boolean isBuffer, int size) {
super(memorySegment.size());
this.memorySegment = checkNotNull(memorySegment);
this.recycler = checkNotNull(recycler);
this.isBuffer = isBuffer;
this.currentSize = memorySegment.size();
setSize(size);
}