当前位置: 首页>>代码示例>>Java>>正文


Java MemorySegment.getLong方法代码示例

本文整理汇总了Java中org.apache.flink.core.memory.MemorySegment.getLong方法的典型用法代码示例。如果您正苦于以下问题:Java MemorySegment.getLong方法的具体用法?Java MemorySegment.getLong怎么用?Java MemorySegment.getLong使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.core.memory.MemorySegment的用法示例。


在下文中一共展示了MemorySegment.getLong方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: insert

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * Inserts the given record into the hash table.
 * Note: this method doesn't care about whether a record with the same key is already present.
 * @param record The record to insert.
 * @throws IOException (EOFException specifically, if memory ran out)
    */
@Override
public void insert(T record) throws IOException {
	if (closed) {
		return;
	}

	final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record));
	final int bucket = hashCode & numBucketsMask;
	final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket
	final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex];
	final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment
	final long firstPointer = bucketSegment.getLong(bucketOffset);

	try {
		final long newFirstPointer = recordArea.appendPointerAndRecord(firstPointer, record);
		bucketSegment.putLong(bucketOffset, newFirstPointer);
	} catch (EOFException ex) {
		compactOrThrow();
		insert(record);
		return;
	}

	numElements++;
	resizeTableIfNecessary();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:32,代码来源:InPlaceMutableHashTable.java

示例2: compare

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public int compare(int segmentNumberI, int segmentOffsetI, int segmentNumberJ, int segmentOffsetJ) {
	final MemorySegment segI = this.sortIndex.get(segmentNumberI);
	final MemorySegment segJ = this.sortIndex.get(segmentNumberJ);

	int val = segI.compare(segJ, segmentOffsetI + OFFSET_LEN, segmentOffsetJ + OFFSET_LEN, this.numKeyBytes);

	if (val != 0 || this.normalizedKeyFullyDetermines) {
		return this.useNormKeyUninverted ? val : -val;
	}

	final long pointerI = segI.getLong(segmentOffsetI) & POINTER_MASK;
	final long pointerJ = segJ.getLong(segmentOffsetJ) & POINTER_MASK;

	return compareRecords(pointerI, pointerJ);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:17,代码来源:NormalizedKeySorter.java

示例3: buildBloomFilterForExtraOverflowSegments

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private void buildBloomFilterForExtraOverflowSegments(int bucketInSegmentPos, MemorySegment bucket, HashPartition<BT, PT> p) {
	int totalCount = 0;
	boolean skip = false;
	long forwardPointer = bucket.getLong(bucketInSegmentPos + HEADER_FORWARD_OFFSET);
	while (forwardPointer != BUCKET_FORWARD_POINTER_NOT_SET) {
		final int overflowSegNum = (int) (forwardPointer >>> 32);
		if (overflowSegNum < 0 || overflowSegNum >= p.numOverflowSegments) {
			skip = true;
			break;
		}
		MemorySegment overflowSegment = p.overflowSegments[overflowSegNum];
		int bucketInOverflowSegmentOffset = (int) forwardPointer;
		
		final int count = overflowSegment.getShort(bucketInOverflowSegmentOffset + HEADER_COUNT_OFFSET);
		totalCount += count;
		// The bits size of bloom filter per bucket is 112 * 8, while expected input entries is greater than 2048, the fpp would higher than 0.9,
		// which make the bloom filter an overhead instead of optimization.
		if (totalCount > 2048) {
			skip = true;
			break;
		}
		
		for (int i = 0; i < count; i++) {
			int hashCode = overflowSegment.getInt(bucketInOverflowSegmentOffset + BUCKET_HEADER_LENGTH + i * HASH_CODE_LEN);
			this.bloomFilter.addHash(hashCode);
		}
		
		forwardPointer = overflowSegment.getLong(bucketInOverflowSegmentOffset + HEADER_FORWARD_OFFSET);
		
	}
	
	if (!skip) {
		bucket.put(bucketInSegmentPos + HEADER_STATUS_OFFSET, BUCKET_STATUS_IN_FILTER);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:36,代码来源:MutableHashTable.java

示例4: rebuild

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/** Same as above, but the number of bucket segments of the new table can be specified. */
private void rebuild(long newNumBucketSegments) throws IOException {
	// Get new bucket segments
	releaseBucketSegments();
	allocateBucketSegments((int)newNumBucketSegments);

	T record = buildSideSerializer.createInstance();
	try {
		EntryIterator iter = getEntryIterator();
		recordArea.resetAppendPosition();
		recordArea.setWritePosition(0);
		while ((record = iter.next(record)) != null && !closed) {
			final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record));
			final int bucket = hashCode & numBucketsMask;
			final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket
			final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex];
			final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment
			final long firstPointer = bucketSegment.getLong(bucketOffset);

			long ptrToAppended = recordArea.noSeekAppendPointerAndRecord(firstPointer, record);
			bucketSegment.putLong(bucketOffset, ptrToAppended);
		}
		recordArea.freeSegmentsAfterAppendPosition();
		holes = 0;

	} catch (EOFException ex) {
		throw new RuntimeException("Bug in InPlaceMutableHashTable: we shouldn't get out of memory during a rebuild, " +
			"because we aren't allocating any new memory.");
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:31,代码来源:InPlaceMutableHashTable.java

示例5: getMatchFor

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * Searches the hash table for the record with the given key.
 * (If there would be multiple matches, only one is returned.)
 * @param record The record whose key we are searching for
 * @param targetForMatch If a match is found, it will be written here
       * @return targetForMatch if a match is found, otherwise null.
       */
@Override
public T getMatchFor(PT record, T targetForMatch) {
	if (closed) {
		return null;
	}

	final int hashCode = MathUtils.jenkinsHash(probeTypeComparator.hash(record));
	final int bucket = hashCode & numBucketsMask;
	bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket
	final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex];
	bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment

	curElemPtr = bucketSegment.getLong(bucketOffset);

	pairComparator.setReference(record);

	T currentRecordInList = targetForMatch;

	prevElemPtr = INVALID_PREV_POINTER;
	try {
		while (curElemPtr != END_OF_LIST && !closed) {
			recordArea.setReadPosition(curElemPtr);
			nextPtr = recordArea.readPointer();

			currentRecordInList = recordArea.readRecord(currentRecordInList);
			recordEnd = recordArea.getReadPosition();
			if (pairComparator.equalToReference(currentRecordInList)) {
				// we found an element with a matching key, and not just a hash collision
				return currentRecordInList;
			}

			prevElemPtr = curElemPtr;
			curElemPtr = nextPtr;
		}
	} catch (IOException ex) {
		throw new RuntimeException("Error deserializing record from the hashtable: " + ex.getMessage(), ex);
	}
	return null;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:47,代码来源:InPlaceMutableHashTable.java

示例6: compactPartition

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * Compacts (garbage collects) partition with copy-compact strategy using compaction partition
 * 
 * @param partitionNumber partition to compact
 * @throws IOException 
 */
private void compactPartition(final int partitionNumber) throws IOException {
	// do nothing if table was closed, parameter is invalid or no garbage exists
	if (this.closed || partitionNumber >= this.partitions.size() || this.partitions.get(partitionNumber).isCompacted()) {
		return;
	}
	// release all segments owned by compaction partition
	this.compactionMemory.clearAllMemory(availableMemory);
	this.compactionMemory.allocateSegments(1);
	this.compactionMemory.pushDownPages();
	T tempHolder = this.buildSideSerializer.createInstance();
	final int numPartitions = this.partitions.size();
	InMemoryPartition<T> partition = this.partitions.remove(partitionNumber);
	MemorySegment[] overflowSegments = partition.overflowSegments;
	long pointer;
	int pointerOffset;
	int bucketOffset;
	final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
	for (int i = 0, bucket = partitionNumber; i < this.buckets.length && bucket < this.numBuckets; i++) {
		MemorySegment segment = this.buckets[i];
		// go over all buckets in the segment belonging to the partition
		for (int k = bucket % bucketsPerSegment; k < bucketsPerSegment && bucket < this.numBuckets; k += numPartitions, bucket += numPartitions) {
			bucketOffset = k * HASH_BUCKET_SIZE;
			if((int)segment.get(bucketOffset + HEADER_PARTITION_OFFSET) != partitionNumber) {
				throw new IOException("Accessed wrong bucket! wanted: " + partitionNumber + " got: " + segment.get(bucketOffset + HEADER_PARTITION_OFFSET));
			}
			// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
			int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
			int numInSegment = 0;
			pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
			while (true) {
				while (numInSegment < countInSegment) {
					pointer = segment.getLong(pointerOffset);
					tempHolder = partition.readRecordAt(pointer, tempHolder);
					pointer = this.compactionMemory.appendRecord(tempHolder);
					segment.putLong(pointerOffset, pointer);
					pointerOffset += POINTER_LEN;
					numInSegment++;
				}
				// this segment is done. check if there is another chained bucket
				final long forwardPointer = segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
				if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
					break;
				}
				final int overflowSegNum = (int) (forwardPointer >>> 32);
				segment = overflowSegments[overflowSegNum];
				bucketOffset = (int) forwardPointer;
				countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
				pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
				numInSegment = 0;
			}
			segment = this.buckets[i];
		}
	}
	// swap partition with compaction partition
	this.compactionMemory.setPartitionNumber(partitionNumber);
	this.partitions.add(partitionNumber, compactionMemory);
	this.partitions.get(partitionNumber).overflowSegments = partition.overflowSegments;
	this.partitions.get(partitionNumber).numOverflowSegments = partition.numOverflowSegments;
	this.partitions.get(partitionNumber).nextOverflowBucket = partition.nextOverflowBucket;
	this.partitions.get(partitionNumber).setIsCompacted(true);
	//this.partitions.get(partitionNumber).pushDownPages();
	this.compactionMemory = partition;
	this.compactionMemory.resetRecordCounter();
	this.compactionMemory.setPartitionNumber(-1);
	this.compactionMemory.overflowSegments = null;
	this.compactionMemory.numOverflowSegments = 0;
	this.compactionMemory.nextOverflowBucket = 0;
	// try to allocate maximum segment count
	this.compactionMemory.clearAllMemory(this.availableMemory);
	int maxSegmentNumber = this.getMaxPartition();
	this.compactionMemory.allocateSegments(maxSegmentNumber);
	this.compactionMemory.resetRWViews();
	this.compactionMemory.pushDownPages();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:81,代码来源:CompactingHashTable.java

示例7: fillCache

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * utility function that inserts all entries from a bucket and its overflow buckets into the cache
 * 
 * @return true if last bucket was not reached yet
 * @throws IOException
 */
private boolean fillCache() throws IOException {
	if(currentBucketIndex >= table.numBuckets) {
		return false;
	}
	MemorySegment bucket = table.buckets[currentSegmentIndex];
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(currentBucketOffset + HEADER_PARTITION_OFFSET);
	final InMemoryPartition<T> partition = table.partitions.get(partitionNumber);
	final MemorySegment[] overflowSegments = partition.overflowSegments;
	
	int countInSegment = bucket.getInt(currentBucketOffset + HEADER_COUNT_OFFSET);
	int numInSegment = 0;
	int posInSegment = currentBucketOffset + BUCKET_POINTER_START_OFFSET;
	int bucketOffset = currentBucketOffset;

	// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
	while (true) {
		while (numInSegment < countInSegment) {
			long pointer = bucket.getLong(posInSegment);
			posInSegment += POINTER_LEN;
			numInSegment++;
			T target = table.buildSideSerializer.createInstance();
			try {
				target = partition.readRecordAt(pointer, target);
				cache.add(target);
			} catch (IOException e) {
					throw new RuntimeException("Error deserializing record from the Hash Table: " + e.getMessage(), e);
			}
		}
		// this segment is done. check if there is another chained bucket
		final long forwardPointer = bucket.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
		if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
			break;
		}
		final int overflowSegNum = (int) (forwardPointer >>> 32);
		bucket = overflowSegments[overflowSegNum];
		bucketOffset = (int) forwardPointer;
		countInSegment = bucket.getInt(bucketOffset + HEADER_COUNT_OFFSET);
		posInSegment = bucketOffset + BUCKET_POINTER_START_OFFSET;
		numInSegment = 0;
	}
	currentBucketIndex++;
	if(currentBucketIndex % bucketsPerSegment == 0) {
		currentSegmentIndex++;
		currentBucketOffset = 0;
	} else {
		currentBucketOffset += HASH_BUCKET_SIZE;
	}
	return true;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:57,代码来源:CompactingHashTable.java

示例8: getMatchFor

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
public T getMatchFor(PT probeSideRecord, T reuse) {
	if (closed) {
		return null;
	}
	final int searchHashCode = MathUtils.jenkinsHash(this.probeTypeComparator.hash(probeSideRecord));
	
	final int posHashCode = searchHashCode % numBuckets;
	
	// get the bucket for the given hash code
	MemorySegment bucket = buckets[posHashCode >> bucketsPerSegmentBits];
	int bucketInSegmentOffset = (posHashCode & bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
	final InMemoryPartition<T> p = partitions.get(partitionNumber);
	final MemorySegment[] overflowSegments = p.overflowSegments;
	
	this.pairComparator.setReference(probeSideRecord);
	
	int countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
	int numInSegment = 0;
	int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;

	// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
	while (true) {
		
		while (numInSegment < countInSegment) {
			
			final int thisCode = bucket.getInt(posInSegment);
			posInSegment += HASH_CODE_LEN;
				
			// check if the hash code matches
			if (thisCode == searchHashCode) {
				// get the pointer to the pair
				final int pointerOffset = bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET + (numInSegment * POINTER_LEN);
				final long pointer = bucket.getLong(pointerOffset);
				numInSegment++;
				
				// deserialize the key to check whether it is really equal, or whether we had only a hash collision
				try {
					reuse = p.readRecordAt(pointer, reuse);
					
					if (this.pairComparator.equalToReference(reuse)) {
						this.partition = p;
						this.bucket = bucket;
						this.pointerOffsetInBucket = pointerOffset;
						return reuse;
					}
				}
				catch (IOException e) {
					throw new RuntimeException("Error deserializing record from the hashtable: " + e.getMessage(), e);
				}
			}
			else {
				numInSegment++;
			}
		}
		
		// this segment is done. check if there is another chained bucket
		final long forwardPointer = bucket.getLong(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
		if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
			return null;
		}
		
		final int overflowSegNum = (int) (forwardPointer >>> 32);
		bucket = overflowSegments[overflowSegNum];
		bucketInSegmentOffset = (int) forwardPointer;
		countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
		posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
		numInSegment = 0;
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:73,代码来源:CompactingHashTable.java

示例9: writeToOutput

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void writeToOutput(ChannelWriterOutputView output, LargeRecordHandler<T> largeRecordsOutput)
		throws IOException
{
	if (LOG.isDebugEnabled()) {
		if (largeRecordsOutput == null) {
			LOG.debug("Spilling sort buffer without large record handling.");
		} else {
			LOG.debug("Spilling sort buffer with large record handling.");
		}
	}
	
	final int numRecords = this.numRecords;
	int currentMemSeg = 0;
	int currentRecord = 0;
	
	while (currentRecord < numRecords) {
		final MemorySegment currentIndexSegment = this.sortIndex.get(currentMemSeg++);

		// go through all records in the memory segment
		for (int offset = 0; currentRecord < numRecords && offset <= this.lastIndexEntryOffset; currentRecord++, offset += this.indexEntrySize) {
			final long pointer = currentIndexSegment.getLong(offset);
			
			// small records go into the regular spill file, large records into the special code path
			if (pointer >= 0 || largeRecordsOutput == null) {
				this.recordBuffer.setReadPosition(pointer);
				this.serializer.copy(this.recordBuffer, output);
			}
			else {
				
				if (LOG.isDebugEnabled()) {
					LOG.debug("Spilling large record to large record fetch file.");
				}
				
				this.recordBuffer.setReadPosition(pointer & POINTER_MASK);
				T record = this.serializer.deserialize(this.recordBuffer);
				largeRecordsOutput.addRecord(record);
			}
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:42,代码来源:NormalizedKeySorter.java


注:本文中的org.apache.flink.core.memory.MemorySegment.getLong方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。