本文整理汇总了Java中org.apache.flink.core.memory.MemorySegment.getShort方法的典型用法代码示例。如果您正苦于以下问题:Java MemorySegment.getShort方法的具体用法?Java MemorySegment.getShort怎么用?Java MemorySegment.getShort使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.core.memory.MemorySegment
的用法示例。
在下文中一共展示了MemorySegment.getShort方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildBloomFilterForBucket
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
* Set all the bucket memory except bucket header as the bit set of bloom filter, and use hash code of build records
* to build bloom filter.
*/
final void buildBloomFilterForBucket(int bucketInSegmentPos, MemorySegment bucket, HashPartition<BT, PT> p) {
final int count = bucket.getShort(bucketInSegmentPos + HEADER_COUNT_OFFSET);
if (count <= 0) {
return;
}
int[] hashCodes = new int[count];
// As the hashcode and bloom filter occupy same bytes, so we read all hashcode out at first and then write back to bloom filter.
for (int i = 0; i < count; i++) {
hashCodes[i] = bucket.getInt(bucketInSegmentPos + BUCKET_HEADER_LENGTH + i * HASH_CODE_LEN);
}
this.bloomFilter.setBitsLocation(bucket, bucketInSegmentPos + BUCKET_HEADER_LENGTH);
for (int hashCode : hashCodes) {
this.bloomFilter.addHash(hashCode);
}
buildBloomFilterForExtraOverflowSegments(bucketInSegmentPos, bucket, p);
}
示例2: buildBloomFilterForExtraOverflowSegments
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private void buildBloomFilterForExtraOverflowSegments(int bucketInSegmentPos, MemorySegment bucket, HashPartition<BT, PT> p) {
int totalCount = 0;
boolean skip = false;
long forwardPointer = bucket.getLong(bucketInSegmentPos + HEADER_FORWARD_OFFSET);
while (forwardPointer != BUCKET_FORWARD_POINTER_NOT_SET) {
final int overflowSegNum = (int) (forwardPointer >>> 32);
if (overflowSegNum < 0 || overflowSegNum >= p.numOverflowSegments) {
skip = true;
break;
}
MemorySegment overflowSegment = p.overflowSegments[overflowSegNum];
int bucketInOverflowSegmentOffset = (int) forwardPointer;
final int count = overflowSegment.getShort(bucketInOverflowSegmentOffset + HEADER_COUNT_OFFSET);
totalCount += count;
// The bits size of bloom filter per bucket is 112 * 8, while expected input entries is greater than 2048, the fpp would higher than 0.9,
// which make the bloom filter an overhead instead of optimization.
if (totalCount > 2048) {
skip = true;
break;
}
for (int i = 0; i < count; i++) {
int hashCode = overflowSegment.getInt(bucketInOverflowSegmentOffset + BUCKET_HEADER_LENGTH + i * HASH_CODE_LEN);
this.bloomFilter.addHash(hashCode);
}
forwardPointer = overflowSegment.getLong(bucketInOverflowSegmentOffset + HEADER_FORWARD_OFFSET);
}
if (!skip) {
bucket.put(bucketInSegmentPos + HEADER_STATUS_OFFSET, BUCKET_STATUS_IN_FILTER);
}
}
示例3: set
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
void set(MemorySegment bucket, MemorySegment[] overflowSegments, HashPartition<BT, PT> partition,
int searchHashCode, int bucketInSegmentOffset)
{
this.bucket = bucket;
this.originalBucket = bucket;
this.overflowSegments = overflowSegments;
this.partition = partition;
this.searchHashCode = searchHashCode;
this.bucketInSegmentOffset = bucketInSegmentOffset;
this.originalBucketInSegmentOffset = bucketInSegmentOffset;
this.posInSegment = this.bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
this.countInSegment = bucket.getShort(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
this.numInSegment = 0;
}
示例4: setBucket
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private void setBucket(MemorySegment bucket, MemorySegment[] overflowSegments, HashPartition<BT, PT> partition,
int bucketInSegmentOffset) {
this.bucketSegment = bucket;
this.overflowSegments = overflowSegments;
this.partition = partition;
this.bucketInSegmentOffset = bucketInSegmentOffset;
this.countInSegment = bucket.getShort(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
this.numInSegment = 0;
// reset probedSet with probedFlags offset in this bucket.
this.probedSet.setMemorySegment(bucketSegment, this.bucketInSegmentOffset + HEADER_PROBED_FLAGS_OFFSET);
}
示例5: nextSegment
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
* Gets the next segment from the asynchronous block reader. If more requests are to be issued, the method
* first sends a new request with the current memory segment. If no more requests are pending, the method
* adds the segment to the readers return queue, which thereby effectively collects all memory segments.
* Secondly, the method fetches the next non-consumed segment
* returned by the reader. If no further segments are available, this method thrown an {@link EOFException}.
*
* @param current The memory segment used for the next request.
* @return The memory segment to read from next.
*
* @throws EOFException Thrown, if no further segments are available.
* @throws IOException Thrown, if an I/O error occurred while reading
* @see AbstractPagedInputView#nextSegment(org.apache.flink.core.memory.MemorySegment)
*/
@Override
protected MemorySegment nextSegment(MemorySegment current) throws IOException {
// check if we are at our end
if (this.inLastBlock) {
throw new EOFException();
}
// send a request first. if we have only a single segment, this same segment will be the one obtained in
// the next lines
if (current != null) {
sendReadRequest(current);
}
// get the next segment
final MemorySegment seg = this.reader.getNextReturnedBlock();
// check the header
if (seg.getShort(0) != ChannelWriterOutputView.HEADER_MAGIC_NUMBER) {
throw new IOException("The current block does not belong to a ChannelWriterOutputView / " +
"ChannelReaderInputView: Wrong magic number.");
}
if ( (seg.getShort(ChannelWriterOutputView.HEADER_FLAGS_OFFSET) & ChannelWriterOutputView.FLAG_LAST_BLOCK) != 0) {
// last block
this.numRequestsRemaining = 0;
this.inLastBlock = true;
}
return seg;
}