本文整理汇总了Java中org.apache.flink.core.memory.MemorySegment.putShort方法的典型用法代码示例。如果您正苦于以下问题:Java MemorySegment.putShort方法的具体用法?Java MemorySegment.putShort怎么用?Java MemorySegment.putShort使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.core.memory.MemorySegment
的用法示例。
在下文中一共展示了MemorySegment.putShort方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initTable
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
protected void initTable(int numBuckets, byte numPartitions) {
final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1);
final MemorySegment[] table = new MemorySegment[numSegs];
ensureNumBuffersReturned(numSegs);
// go over all segments that are part of the table
for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) {
final MemorySegment seg = getNextBuffer();
// go over all buckets in the segment
for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) {
final int bucketOffset = k * HASH_BUCKET_SIZE;
// compute the partition that the bucket corresponds to
final byte partition = assignPartition(bucket, numPartitions);
// initialize the header fields
seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition);
seg.put(bucketOffset + HEADER_STATUS_OFFSET, BUCKET_STATUS_IN_MEMORY);
seg.putShort(bucketOffset + HEADER_COUNT_OFFSET, (short) 0);
seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET);
seg.putShort(bucketOffset + HEADER_PROBED_FLAGS_OFFSET, (short) 0);
}
table[i] = seg;
}
this.buckets = table;
this.numBuckets = numBuckets;
if (useBloomFilters) {
initBloomFilter(numBuckets);
}
}
示例2: writeSegment
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private void writeSegment(MemorySegment segment, int writePosition, boolean lastSegment) throws IOException
{
segment.putShort(0, HEADER_MAGIC_NUMBER);
segment.putShort(HEADER_FLAGS_OFFSET, lastSegment ? FLAG_LAST_BLOCK : 0);
segment.putInt(HEAD_BLOCK_LENGTH_OFFSET, writePosition);
this.writer.writeBlock(segment);
this.bytesBeforeSegment += writePosition - HEADER_LENGTH;
}
示例3: testBucketsNotFulfillSegment
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Test
public void testBucketsNotFulfillSegment() throws Exception {
final int NUM_KEYS = 10000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 30000 pairs with 3 values sharing the same key
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
// create a probe input that gives 100000 pairs with 10 values sharing a key
MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
// 33 is minimum number of pages required to perform hash join this inputs
memSegments = this.memManager.allocatePages(MEM_OWNER, 33);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// For FLINK-2545, the buckets data may not fulfill it's buffer, for example, the buffer may contains 256 buckets,
// while hash table only assign 250 bucket on it. The unused buffer bytes may contains arbitrary data, which may
// influence hash table if forget to skip it. To mock this, put the invalid bucket data(partition=1, inMemory=true, count=-1)
// at the end of buffer.
for (MemorySegment segment : memSegments) {
int newBucketOffset = segment.size() - 128;
// initialize the header fields
segment.put(newBucketOffset + 0, (byte)0);
segment.put(newBucketOffset + 1, (byte)0);
segment.putShort(newBucketOffset + 2, (short) -1);
segment.putLong(newBucketOffset + 4, ~0x0L);
}
// ----------------------------------------------------------------------------------------
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(
this.pairBuildSideAccesssor, this.pairProbeSideAccesssor,
this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator,
memSegments, ioManager);
join.open(buildInput, probeInput);
final IntPair recordReuse = new IntPair();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult);
join.close();
this.memManager.release(join.getFreedMemory());
}