当前位置: 首页>>代码示例>>Java>>正文


Java MemorySegment.get方法代码示例

本文整理汇总了Java中org.apache.flink.core.memory.MemorySegment.get方法的典型用法代码示例。如果您正苦于以下问题:Java MemorySegment.get方法的具体用法?Java MemorySegment.get怎么用?Java MemorySegment.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.core.memory.MemorySegment的用法示例。


在下文中一共展示了MemorySegment.get方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: insert

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public final void insert(T record) throws IOException {
	if (this.closed) {
		return;
	}

	final int hashCode = MathUtils.jenkinsHash(this.buildSideComparator.hash(record));
	final int posHashCode = hashCode % this.numBuckets;
	
	// get the bucket for the given hash code
	final int bucketArrayPos = posHashCode >>> this.bucketsPerSegmentBits;
	final int bucketInSegmentPos = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	final MemorySegment bucket = this.buckets[bucketArrayPos];
	
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(bucketInSegmentPos + HEADER_PARTITION_OFFSET);
	InMemoryPartition<T> partition = this.partitions.get(partitionNumber);
	
	long pointer = insertRecordIntoPartition(record, partition, false);
	insertBucketEntryFromStart(bucket, bucketInSegmentPos, hashCode, pointer, partitionNumber);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:22,代码来源:CompactingHashTable.java

示例2: getMatchesFor

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
public HashBucketIterator<BT, PT> getMatchesFor(PT record) throws IOException {
	final TypeComparator<PT> probeAccessors = this.probeSideComparator;
	final int hash = hash(probeAccessors.hash(record), this.currentRecursionDepth);
	final int posHashCode = hash % this.numBuckets;
	
	// get the bucket for the given hash code
	final int bucketArrayPos = posHashCode >> this.bucketsPerSegmentBits;
	final int bucketInSegmentOffset = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	final MemorySegment bucket = this.buckets[bucketArrayPos];
	
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
	final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(partitionNumber);
	
	// for an in-memory partition, process set the return iterators, else spill the probe records
	if (p.isInMemory()) {
		this.recordComparator.setReference(record);
		this.bucketIterator.set(bucket, p.overflowSegments, p, hash, bucketInSegmentOffset);
		return this.bucketIterator;
	}
	else {
		throw new IllegalStateException("Method is not applicable to partially spilled hash tables.");
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:MutableHashTable.java

示例3: buildBloomFilterForBucketsInPartition

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
final protected void buildBloomFilterForBucketsInPartition(int partNum, HashPartition<BT, PT> partition) {
	// Find all the buckets which belongs to this partition, and build bloom filter for each bucket(include its overflow buckets).
	final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;

	int numSegs = this.buckets.length;
	// go over all segments that are part of the table
	for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) {
		final MemorySegment segment = this.buckets[i];
		// go over all buckets in the segment
		for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) {
			final int bucketInSegmentOffset = k * HASH_BUCKET_SIZE;
			byte partitionNumber = segment.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
			if (partitionNumber == partNum) {
				byte status = segment.get(bucketInSegmentOffset + HEADER_STATUS_OFFSET);
				if (status == BUCKET_STATUS_IN_MEMORY) {
					buildBloomFilterForBucket(bucketInSegmentOffset, segment, partition);
				}
			}
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:22,代码来源:MutableHashTable.java

示例4: moveToNextBucket

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * Move to next bucket, return true while move to a on heap bucket, return false while move to a spilled bucket
 * or there is no more bucket.
 */
private boolean moveToNextBucket() {
	scanCount++;
	if (scanCount > totalBucketNumber - 1) {
		return false;
	}
	// move to next bucket, update all the current bucket status with new bucket information.
	final int bucketArrayPos = scanCount >> this.bucketsPerSegmentBits;
	final int currentBucketInSegmentOffset = (scanCount & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	MemorySegment currentBucket = this.buckets[bucketArrayPos];
	final int partitionNumber = currentBucket.get(currentBucketInSegmentOffset + HEADER_PARTITION_OFFSET);
	final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(partitionNumber);
	if (p.isInMemory()) {
		setBucket(currentBucket, p.overflowSegments, p, currentBucketInSegmentOffset);
		return true;
	} else {
		return false;
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:23,代码来源:MutableHashTable.java

示例5: validateBuffer

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private static void validateBuffer(BufferOrEvent boe, int expectedSize, int expectedChannelIndex) {
	assertEquals("wrong channel index", expectedChannelIndex, boe.getChannelIndex());
	assertTrue("is not buffer", boe.isBuffer());

	Buffer buf = boe.getBuffer();
	assertEquals("wrong buffer size", expectedSize, buf.getSize());

	MemorySegment seg = buf.getMemorySegment();
	for (int i = 0; i < expectedSize; i++) {
		byte expected = (byte) i;
		if (expected != seg.get(i)) {
			fail(String.format(
					"wrong buffer contents at position %s : expected=%d , found=%d", i, expected, seg.get(i)));
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:17,代码来源:BufferSpillerTest.java

示例6: insertIntoTable

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
protected final void insertIntoTable(final BT record, final int hashCode) throws IOException {
	final int posHashCode = hashCode % this.numBuckets;
	
	// get the bucket for the given hash code
	final int bucketArrayPos = posHashCode >> this.bucketsPerSegmentBits;
	final int bucketInSegmentPos = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	final MemorySegment bucket = this.buckets[bucketArrayPos];
	
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(bucketInSegmentPos + HEADER_PARTITION_OFFSET);
	
	// get the partition descriptor for the bucket
	if (partitionNumber < 0 || partitionNumber >= this.partitionsBeingBuilt.size()) {
		throw new RuntimeException("Error: Hash structures in Hash-Join are corrupt. Invalid partition number for bucket.");
	}
	final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(partitionNumber);
	
	// --------- Step 1: Get the partition for this pair and put the pair into the buffer ---------
	
	long pointer = p.insertIntoBuildBuffer(record);
	if (pointer != -1) {
		// record was inserted into an in-memory partition. a pointer must be inserted into the buckets
		insertBucketEntry(p, bucket, bucketInSegmentPos, hashCode, pointer, true);
	} else {
		byte status = bucket.get(bucketInSegmentPos + HEADER_STATUS_OFFSET);
		if (status == BUCKET_STATUS_IN_FILTER) {
			// While partition has been spilled, relocation bloom filter bits for current bucket,
			// and build bloom filter with hashcode.
			this.bloomFilter.setBitsLocation(bucket, bucketInSegmentPos + BUCKET_HEADER_LENGTH);
			this.bloomFilter.addHash(hashCode);
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:34,代码来源:MutableHashTable.java

示例7: addNextChunkFromMemorySegment

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private void addNextChunkFromMemorySegment(MemorySegment segment, int numBytesInSegment) throws IOException {
	int segmentPosition = 0;

	// check where to go. if we have a partial length, we need to complete it first
	if (this.lengthBuffer.position() > 0) {
		int toPut = Math.min(this.lengthBuffer.remaining(), numBytesInSegment);
		segment.get(0, this.lengthBuffer, toPut);

		// did we complete the length?
		if (this.lengthBuffer.hasRemaining()) {
			return;
		} else {
			this.recordLength = this.lengthBuffer.getInt(0);

			this.lengthBuffer.clear();
			segmentPosition = toPut;
		}
	}

	// copy as much as we need or can for this next spanning record
	int needed = this.recordLength - this.recordLimit;
	int available = numBytesInSegment - segmentPosition;
	int toCopy = Math.min(needed, available);

	segment.get(this.serializationBuffer, segmentPosition, toCopy);
	this.recordLimit += toCopy;

	if (toCopy < available) {
		// there is more data in the segment
		this.leftOverData = segment;
		this.leftOverStart = segmentPosition + toCopy;
		this.leftOverLimit = numBytesInSegment;
	}

	// update read view
	this.serializationReadBuffer.setBuffer(this.serializationBuffer.wrapAsByteBuffer());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:38,代码来源:AdaptiveSpanningRecordDeserializer.java

示例8: compactPartition

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * Compacts (garbage collects) partition with copy-compact strategy using compaction partition
 * 
 * @param partitionNumber partition to compact
 * @throws IOException 
 */
private void compactPartition(final int partitionNumber) throws IOException {
	// do nothing if table was closed, parameter is invalid or no garbage exists
	if (this.closed || partitionNumber >= this.partitions.size() || this.partitions.get(partitionNumber).isCompacted()) {
		return;
	}
	// release all segments owned by compaction partition
	this.compactionMemory.clearAllMemory(availableMemory);
	this.compactionMemory.allocateSegments(1);
	this.compactionMemory.pushDownPages();
	T tempHolder = this.buildSideSerializer.createInstance();
	final int numPartitions = this.partitions.size();
	InMemoryPartition<T> partition = this.partitions.remove(partitionNumber);
	MemorySegment[] overflowSegments = partition.overflowSegments;
	long pointer;
	int pointerOffset;
	int bucketOffset;
	final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
	for (int i = 0, bucket = partitionNumber; i < this.buckets.length && bucket < this.numBuckets; i++) {
		MemorySegment segment = this.buckets[i];
		// go over all buckets in the segment belonging to the partition
		for (int k = bucket % bucketsPerSegment; k < bucketsPerSegment && bucket < this.numBuckets; k += numPartitions, bucket += numPartitions) {
			bucketOffset = k * HASH_BUCKET_SIZE;
			if((int)segment.get(bucketOffset + HEADER_PARTITION_OFFSET) != partitionNumber) {
				throw new IOException("Accessed wrong bucket! wanted: " + partitionNumber + " got: " + segment.get(bucketOffset + HEADER_PARTITION_OFFSET));
			}
			// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
			int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
			int numInSegment = 0;
			pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
			while (true) {
				while (numInSegment < countInSegment) {
					pointer = segment.getLong(pointerOffset);
					tempHolder = partition.readRecordAt(pointer, tempHolder);
					pointer = this.compactionMemory.appendRecord(tempHolder);
					segment.putLong(pointerOffset, pointer);
					pointerOffset += POINTER_LEN;
					numInSegment++;
				}
				// this segment is done. check if there is another chained bucket
				final long forwardPointer = segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
				if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
					break;
				}
				final int overflowSegNum = (int) (forwardPointer >>> 32);
				segment = overflowSegments[overflowSegNum];
				bucketOffset = (int) forwardPointer;
				countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
				pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
				numInSegment = 0;
			}
			segment = this.buckets[i];
		}
	}
	// swap partition with compaction partition
	this.compactionMemory.setPartitionNumber(partitionNumber);
	this.partitions.add(partitionNumber, compactionMemory);
	this.partitions.get(partitionNumber).overflowSegments = partition.overflowSegments;
	this.partitions.get(partitionNumber).numOverflowSegments = partition.numOverflowSegments;
	this.partitions.get(partitionNumber).nextOverflowBucket = partition.nextOverflowBucket;
	this.partitions.get(partitionNumber).setIsCompacted(true);
	//this.partitions.get(partitionNumber).pushDownPages();
	this.compactionMemory = partition;
	this.compactionMemory.resetRecordCounter();
	this.compactionMemory.setPartitionNumber(-1);
	this.compactionMemory.overflowSegments = null;
	this.compactionMemory.numOverflowSegments = 0;
	this.compactionMemory.nextOverflowBucket = 0;
	// try to allocate maximum segment count
	this.compactionMemory.clearAllMemory(this.availableMemory);
	int maxSegmentNumber = this.getMaxPartition();
	this.compactionMemory.allocateSegments(maxSegmentNumber);
	this.compactionMemory.resetRWViews();
	this.compactionMemory.pushDownPages();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:81,代码来源:CompactingHashTable.java

示例9: fillCache

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * utility function that inserts all entries from a bucket and its overflow buckets into the cache
 * 
 * @return true if last bucket was not reached yet
 * @throws IOException
 */
private boolean fillCache() throws IOException {
	if(currentBucketIndex >= table.numBuckets) {
		return false;
	}
	MemorySegment bucket = table.buckets[currentSegmentIndex];
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(currentBucketOffset + HEADER_PARTITION_OFFSET);
	final InMemoryPartition<T> partition = table.partitions.get(partitionNumber);
	final MemorySegment[] overflowSegments = partition.overflowSegments;
	
	int countInSegment = bucket.getInt(currentBucketOffset + HEADER_COUNT_OFFSET);
	int numInSegment = 0;
	int posInSegment = currentBucketOffset + BUCKET_POINTER_START_OFFSET;
	int bucketOffset = currentBucketOffset;

	// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
	while (true) {
		while (numInSegment < countInSegment) {
			long pointer = bucket.getLong(posInSegment);
			posInSegment += POINTER_LEN;
			numInSegment++;
			T target = table.buildSideSerializer.createInstance();
			try {
				target = partition.readRecordAt(pointer, target);
				cache.add(target);
			} catch (IOException e) {
					throw new RuntimeException("Error deserializing record from the Hash Table: " + e.getMessage(), e);
			}
		}
		// this segment is done. check if there is another chained bucket
		final long forwardPointer = bucket.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
		if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
			break;
		}
		final int overflowSegNum = (int) (forwardPointer >>> 32);
		bucket = overflowSegments[overflowSegNum];
		bucketOffset = (int) forwardPointer;
		countInSegment = bucket.getInt(bucketOffset + HEADER_COUNT_OFFSET);
		posInSegment = bucketOffset + BUCKET_POINTER_START_OFFSET;
		numInSegment = 0;
	}
	currentBucketIndex++;
	if(currentBucketIndex % bucketsPerSegment == 0) {
		currentSegmentIndex++;
		currentBucketOffset = 0;
	} else {
		currentBucketOffset += HASH_BUCKET_SIZE;
	}
	return true;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:57,代码来源:CompactingHashTable.java

示例10: getMatchFor

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
public T getMatchFor(PT probeSideRecord, T reuse) {
	if (closed) {
		return null;
	}
	final int searchHashCode = MathUtils.jenkinsHash(this.probeTypeComparator.hash(probeSideRecord));
	
	final int posHashCode = searchHashCode % numBuckets;
	
	// get the bucket for the given hash code
	MemorySegment bucket = buckets[posHashCode >> bucketsPerSegmentBits];
	int bucketInSegmentOffset = (posHashCode & bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
	final InMemoryPartition<T> p = partitions.get(partitionNumber);
	final MemorySegment[] overflowSegments = p.overflowSegments;
	
	this.pairComparator.setReference(probeSideRecord);
	
	int countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
	int numInSegment = 0;
	int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;

	// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
	while (true) {
		
		while (numInSegment < countInSegment) {
			
			final int thisCode = bucket.getInt(posInSegment);
			posInSegment += HASH_CODE_LEN;
				
			// check if the hash code matches
			if (thisCode == searchHashCode) {
				// get the pointer to the pair
				final int pointerOffset = bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET + (numInSegment * POINTER_LEN);
				final long pointer = bucket.getLong(pointerOffset);
				numInSegment++;
				
				// deserialize the key to check whether it is really equal, or whether we had only a hash collision
				try {
					reuse = p.readRecordAt(pointer, reuse);
					
					if (this.pairComparator.equalToReference(reuse)) {
						this.partition = p;
						this.bucket = bucket;
						this.pointerOffsetInBucket = pointerOffset;
						return reuse;
					}
				}
				catch (IOException e) {
					throw new RuntimeException("Error deserializing record from the hashtable: " + e.getMessage(), e);
				}
			}
			else {
				numInSegment++;
			}
		}
		
		// this segment is done. check if there is another chained bucket
		final long forwardPointer = bucket.getLong(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
		if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
			return null;
		}
		
		final int overflowSegNum = (int) (forwardPointer >>> 32);
		bucket = overflowSegments[overflowSegNum];
		bucketInSegmentOffset = (int) forwardPointer;
		countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
		posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
		numInSegment = 0;
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:73,代码来源:CompactingHashTable.java

示例11: processProbeIter

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
protected boolean processProbeIter() throws IOException{
	final ProbeIterator<PT> probeIter = this.probeIterator;
	final TypeComparator<PT> probeAccessors = this.probeSideComparator;

	if (!this.probeMatchedPhase) {
		return false;
	}
	
	PT next;
	while ((next = probeIter.next()) != null) {
		final int hash = hash(probeAccessors.hash(next), this.currentRecursionDepth);
		final int posHashCode = hash % this.numBuckets;
		
		// get the bucket for the given hash code
		final int bucketArrayPos = posHashCode >> this.bucketsPerSegmentBits;
		final int bucketInSegmentOffset = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
		final MemorySegment bucket = this.buckets[bucketArrayPos];
		
		// get the basic characteristics of the bucket
		final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
		final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(partitionNumber);
		
		// for an in-memory partition, process set the return iterators, else spill the probe records
		if (p.isInMemory()) {
			this.recordComparator.setReference(next);
			this.bucketIterator.set(bucket, p.overflowSegments, p, hash, bucketInSegmentOffset);
			return true;
		} else {
			byte status = bucket.get(bucketInSegmentOffset + HEADER_STATUS_OFFSET);
			if (status == BUCKET_STATUS_IN_FILTER) {
				this.bloomFilter.setBitsLocation(bucket, bucketInSegmentOffset + BUCKET_HEADER_LENGTH);
				// Use BloomFilter to filter out all the probe records which would not match any key in spilled build table buckets.
				if (this.bloomFilter.testHash(hash)) {
					p.insertIntoProbeBuffer(next);
				}
			} else {
				p.insertIntoProbeBuffer(next);
			}
		}
	}
	// -------------- partition done ---------------

	return false;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:45,代码来源:MutableHashTable.java

示例12: addNextChunkFromMemorySegment

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private void addNextChunkFromMemorySegment(MemorySegment segment, int numBytesInSegment) throws IOException {
	int segmentPosition = 0;
	
	// check where to go. if we have a partial length, we need to complete it first
	if (this.lengthBuffer.position() > 0) {
		int toPut = Math.min(this.lengthBuffer.remaining(), numBytesInSegment);
		segment.get(0, this.lengthBuffer, toPut);
		
		// did we complete the length?
		if (this.lengthBuffer.hasRemaining()) {
			return;
		} else {
			this.recordLength = this.lengthBuffer.getInt(0);

			this.lengthBuffer.clear();
			segmentPosition = toPut;
			
			if (this.recordLength > THRESHOLD_FOR_SPILLING) {
				this.spillingChannel = createSpillingChannel();
			}
		}
	}

	// copy as much as we need or can for this next spanning record
	int needed = this.recordLength - this.accumulatedRecordBytes;
	int available = numBytesInSegment - segmentPosition;
	int toCopy = Math.min(needed, available);

	if (spillingChannel != null) {
		// spill to file
		ByteBuffer toWrite = segment.wrap(segmentPosition, toCopy);
		this.spillingChannel.write(toWrite);
	}
	else {
		ensureBufferCapacity(accumulatedRecordBytes + toCopy);
		segment.get(segmentPosition, buffer, this.accumulatedRecordBytes, toCopy);
	}
	
	this.accumulatedRecordBytes += toCopy;
	
	if (toCopy < available) {
		// there is more data in the segment
		this.leftOverData = segment;
		this.leftOverStart = segmentPosition + toCopy;
		this.leftOverLimit = numBytesInSegment;
	}
	
	if (accumulatedRecordBytes == recordLength) {
		// we have the full record
		if (spillingChannel == null) {
			this.serializationReadBuffer.setBuffer(buffer, 0, recordLength);
		}
		else {
			spillingChannel.close();

			BufferedInputStream inStream = new BufferedInputStream(new FileInputStream(spillFile), 2 * 1024 * 1024);
			this.spillFileReader = new DataInputViewStreamWrapper(inStream);
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:61,代码来源:SpillingAdaptiveSpanningRecordDeserializer.java


注:本文中的org.apache.flink.core.memory.MemorySegment.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。