当前位置: 首页>>代码示例>>Java>>正文


Java MemorySegment.putLong方法代码示例

本文整理汇总了Java中org.apache.flink.core.memory.MemorySegment.putLong方法的典型用法代码示例。如果您正苦于以下问题:Java MemorySegment.putLong方法的具体用法?Java MemorySegment.putLong怎么用?Java MemorySegment.putLong使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.core.memory.MemorySegment的用法示例。


在下文中一共展示了MemorySegment.putLong方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: insert

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * Inserts the given record into the hash table.
 * Note: this method doesn't care about whether a record with the same key is already present.
 * @param record The record to insert.
 * @throws IOException (EOFException specifically, if memory ran out)
    */
@Override
public void insert(T record) throws IOException {
	if (closed) {
		return;
	}

	final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record));
	final int bucket = hashCode & numBucketsMask;
	final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket
	final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex];
	final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment
	final long firstPointer = bucketSegment.getLong(bucketOffset);

	try {
		final long newFirstPointer = recordArea.appendPointerAndRecord(firstPointer, record);
		bucketSegment.putLong(bucketOffset, newFirstPointer);
	} catch (EOFException ex) {
		compactOrThrow();
		insert(record);
		return;
	}

	numElements++;
	resizeTableIfNecessary();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:32,代码来源:InPlaceMutableHashTable.java

示例2: initTable

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private void initTable(int numBuckets, byte numPartitions) {
	final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
	final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1);
	final MemorySegment[] table = new MemorySegment[numSegs];
	
	// go over all segments that are part of the table
	for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) {
		final MemorySegment seg = getNextBuffer();
		
		// go over all buckets in the segment
		for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) {
			final int bucketOffset = k * HASH_BUCKET_SIZE;	
			
			// compute the partition that the bucket corresponds to
			final byte partition = assignPartition(bucket, numPartitions);
			
			// initialize the header fields
			seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition);
			seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0);
			seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET);
		}
		
		table[i] = seg;
	}
	this.buckets = table;
	this.numBuckets = numBuckets;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:28,代码来源:CompactingHashTable.java

示例3: initTable

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
protected void initTable(int numBuckets, byte numPartitions) {
	final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
	final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1);
	final MemorySegment[] table = new MemorySegment[numSegs];
	
	ensureNumBuffersReturned(numSegs);
	
	// go over all segments that are part of the table
	for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) {
		final MemorySegment seg = getNextBuffer();
		
		// go over all buckets in the segment
		for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) {
			final int bucketOffset = k * HASH_BUCKET_SIZE;	
			
			// compute the partition that the bucket corresponds to
			final byte partition = assignPartition(bucket, numPartitions);
			
			// initialize the header fields
			seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition);
			seg.put(bucketOffset + HEADER_STATUS_OFFSET, BUCKET_STATUS_IN_MEMORY);
			seg.putShort(bucketOffset + HEADER_COUNT_OFFSET, (short) 0);
			seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET);
			seg.putShort(bucketOffset + HEADER_PROBED_FLAGS_OFFSET, (short) 0);
		}
		
		table[i] = seg;
	}
	this.buckets = table;
	this.numBuckets = numBuckets;
	
	if (useBloomFilters) {
		initBloomFilter(numBuckets);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:36,代码来源:MutableHashTable.java

示例4: rebuild

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/** Same as above, but the number of bucket segments of the new table can be specified. */
private void rebuild(long newNumBucketSegments) throws IOException {
	// Get new bucket segments
	releaseBucketSegments();
	allocateBucketSegments((int)newNumBucketSegments);

	T record = buildSideSerializer.createInstance();
	try {
		EntryIterator iter = getEntryIterator();
		recordArea.resetAppendPosition();
		recordArea.setWritePosition(0);
		while ((record = iter.next(record)) != null && !closed) {
			final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record));
			final int bucket = hashCode & numBucketsMask;
			final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket
			final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex];
			final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment
			final long firstPointer = bucketSegment.getLong(bucketOffset);

			long ptrToAppended = recordArea.noSeekAppendPointerAndRecord(firstPointer, record);
			bucketSegment.putLong(bucketOffset, ptrToAppended);
		}
		recordArea.freeSegmentsAfterAppendPosition();
		holes = 0;

	} catch (EOFException ex) {
		throw new RuntimeException("Bug in InPlaceMutableHashTable: we shouldn't get out of memory during a rebuild, " +
			"because we aren't allocating any new memory.");
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:31,代码来源:InPlaceMutableHashTable.java

示例5: insertBucketEntryFromSearch

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * IMPORTANT!!! We pass only the partition number, because we must make sure we get a fresh
 * partition reference. The partition reference used during search for the key may have become
 * invalid during the compaction.
 */
private void insertBucketEntryFromSearch(MemorySegment originalBucket, MemorySegment currentBucket,
											int originalBucketOffset, int currentBucketOffset,
											int countInCurrentBucket, long originalForwardPointer,
											int hashCode, long pointer, int partitionNumber) throws IOException {
	boolean checkForResize = false;
	if (countInCurrentBucket < NUM_ENTRIES_PER_BUCKET) {
		// we are good in our current bucket, put the values
		currentBucket.putInt(currentBucketOffset + BUCKET_HEADER_LENGTH + (countInCurrentBucket * HASH_CODE_LEN), hashCode); // hash code
		currentBucket.putLong(currentBucketOffset + BUCKET_POINTER_START_OFFSET + (countInCurrentBucket * POINTER_LEN), pointer); // pointer
		currentBucket.putInt(currentBucketOffset + HEADER_COUNT_OFFSET, countInCurrentBucket + 1); // update count
	}
	else {
		// we go to a new overflow bucket
		final InMemoryPartition<T> partition = this.partitions.get(partitionNumber);
		MemorySegment overflowSeg;
		final int overflowSegmentNum;
		final int overflowBucketOffset;
		
		// first, see if there is space for an overflow bucket remaining in the last overflow segment
		if (partition.nextOverflowBucket == 0) {
			// no space left in last bucket, or no bucket yet, so create an overflow segment
			overflowSeg = getNextBuffer();
			overflowBucketOffset = 0;
			overflowSegmentNum = partition.numOverflowSegments;
			
			// add the new overflow segment
			if (partition.overflowSegments.length <= partition.numOverflowSegments) {
				MemorySegment[] newSegsArray = new MemorySegment[partition.overflowSegments.length * 2];
				System.arraycopy(partition.overflowSegments, 0, newSegsArray, 0, partition.overflowSegments.length);
				partition.overflowSegments = newSegsArray;
			}
			partition.overflowSegments[partition.numOverflowSegments] = overflowSeg;
			partition.numOverflowSegments++;
			checkForResize = true;
		}
		else {
			// there is space in the last overflow segment
			overflowSegmentNum = partition.numOverflowSegments - 1;
			overflowSeg = partition.overflowSegments[overflowSegmentNum];
			overflowBucketOffset = partition.nextOverflowBucket << NUM_INTRA_BUCKET_BITS;
		}
		
		// next overflow bucket is one ahead. if the segment is full, the next will be at the beginning
		// of a new segment
		partition.nextOverflowBucket = (partition.nextOverflowBucket == this.bucketsPerSegmentMask ? 0 : partition.nextOverflowBucket + 1);
		
		// insert the new overflow bucket in the chain of buckets
		
		// 1) set the old forward pointer
		// 2) let the bucket in the main table point to this one
		overflowSeg.putLong(overflowBucketOffset + HEADER_FORWARD_OFFSET, originalForwardPointer);
		final long pointerToNewBucket = (((long) overflowSegmentNum) << 32) | ((long) overflowBucketOffset);
		originalBucket.putLong(originalBucketOffset + HEADER_FORWARD_OFFSET, pointerToNewBucket);
		
		// finally, insert the values into the overflow buckets
		overflowSeg.putInt(overflowBucketOffset + BUCKET_HEADER_LENGTH, hashCode);	// hash code
		overflowSeg.putLong(overflowBucketOffset + BUCKET_POINTER_START_OFFSET, pointer); // pointer
		
		// set the count to one
		overflowSeg.putInt(overflowBucketOffset + HEADER_COUNT_OFFSET, 1);
		if(checkForResize && !this.isResizing) {
			// check if we should resize buckets
			if(this.buckets.length <= getOverflowSegmentCount()) {
				resizeHashTable();
			}
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:74,代码来源:CompactingHashTable.java

示例6: compactPartition

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * Compacts (garbage collects) partition with copy-compact strategy using compaction partition
 * 
 * @param partitionNumber partition to compact
 * @throws IOException 
 */
private void compactPartition(final int partitionNumber) throws IOException {
	// do nothing if table was closed, parameter is invalid or no garbage exists
	if (this.closed || partitionNumber >= this.partitions.size() || this.partitions.get(partitionNumber).isCompacted()) {
		return;
	}
	// release all segments owned by compaction partition
	this.compactionMemory.clearAllMemory(availableMemory);
	this.compactionMemory.allocateSegments(1);
	this.compactionMemory.pushDownPages();
	T tempHolder = this.buildSideSerializer.createInstance();
	final int numPartitions = this.partitions.size();
	InMemoryPartition<T> partition = this.partitions.remove(partitionNumber);
	MemorySegment[] overflowSegments = partition.overflowSegments;
	long pointer;
	int pointerOffset;
	int bucketOffset;
	final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
	for (int i = 0, bucket = partitionNumber; i < this.buckets.length && bucket < this.numBuckets; i++) {
		MemorySegment segment = this.buckets[i];
		// go over all buckets in the segment belonging to the partition
		for (int k = bucket % bucketsPerSegment; k < bucketsPerSegment && bucket < this.numBuckets; k += numPartitions, bucket += numPartitions) {
			bucketOffset = k * HASH_BUCKET_SIZE;
			if((int)segment.get(bucketOffset + HEADER_PARTITION_OFFSET) != partitionNumber) {
				throw new IOException("Accessed wrong bucket! wanted: " + partitionNumber + " got: " + segment.get(bucketOffset + HEADER_PARTITION_OFFSET));
			}
			// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
			int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
			int numInSegment = 0;
			pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
			while (true) {
				while (numInSegment < countInSegment) {
					pointer = segment.getLong(pointerOffset);
					tempHolder = partition.readRecordAt(pointer, tempHolder);
					pointer = this.compactionMemory.appendRecord(tempHolder);
					segment.putLong(pointerOffset, pointer);
					pointerOffset += POINTER_LEN;
					numInSegment++;
				}
				// this segment is done. check if there is another chained bucket
				final long forwardPointer = segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
				if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
					break;
				}
				final int overflowSegNum = (int) (forwardPointer >>> 32);
				segment = overflowSegments[overflowSegNum];
				bucketOffset = (int) forwardPointer;
				countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
				pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
				numInSegment = 0;
			}
			segment = this.buckets[i];
		}
	}
	// swap partition with compaction partition
	this.compactionMemory.setPartitionNumber(partitionNumber);
	this.partitions.add(partitionNumber, compactionMemory);
	this.partitions.get(partitionNumber).overflowSegments = partition.overflowSegments;
	this.partitions.get(partitionNumber).numOverflowSegments = partition.numOverflowSegments;
	this.partitions.get(partitionNumber).nextOverflowBucket = partition.nextOverflowBucket;
	this.partitions.get(partitionNumber).setIsCompacted(true);
	//this.partitions.get(partitionNumber).pushDownPages();
	this.compactionMemory = partition;
	this.compactionMemory.resetRecordCounter();
	this.compactionMemory.setPartitionNumber(-1);
	this.compactionMemory.overflowSegments = null;
	this.compactionMemory.numOverflowSegments = 0;
	this.compactionMemory.nextOverflowBucket = 0;
	// try to allocate maximum segment count
	this.compactionMemory.clearAllMemory(this.availableMemory);
	int maxSegmentNumber = this.getMaxPartition();
	this.compactionMemory.allocateSegments(maxSegmentNumber);
	this.compactionMemory.resetRWViews();
	this.compactionMemory.pushDownPages();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:81,代码来源:CompactingHashTable.java

示例7: testBucketsNotFulfillSegment

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Test
public void testBucketsNotFulfillSegment() throws Exception {
	final int NUM_KEYS = 10000;
	final int BUILD_VALS_PER_KEY = 3;
	final int PROBE_VALS_PER_KEY = 10;

	// create a build input that gives 30000 pairs with 3 values sharing the same key
	MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);

	// create a probe input that gives 100000 pairs with 10 values sharing a key
	MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);

	// allocate the memory for the HashTable
	List<MemorySegment> memSegments;
	try {
		// 33 is minimum number of pages required to perform hash join this inputs
		memSegments = this.memManager.allocatePages(MEM_OWNER, 33);
	}
	catch (MemoryAllocationException maex) {
		fail("Memory for the Join could not be provided.");
		return;
	}

	// For FLINK-2545, the buckets data may not fulfill it's buffer, for example, the buffer may contains 256 buckets,
	// while hash table only assign 250 bucket on it. The unused buffer bytes may contains arbitrary data, which may
	// influence hash table if forget to skip it. To mock this, put the invalid bucket data(partition=1, inMemory=true, count=-1)
	// at the end of buffer.
	for (MemorySegment segment : memSegments) {
		int newBucketOffset = segment.size() - 128;
		// initialize the header fields
		segment.put(newBucketOffset + 0, (byte)0);
		segment.put(newBucketOffset + 1, (byte)0);
		segment.putShort(newBucketOffset + 2, (short) -1);
		segment.putLong(newBucketOffset + 4, ~0x0L);
	}

	// ----------------------------------------------------------------------------------------

	final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(
		this.pairBuildSideAccesssor, this.pairProbeSideAccesssor,
		this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator,
		memSegments, ioManager);
	join.open(buildInput, probeInput);

	final IntPair recordReuse = new IntPair();
	int numRecordsInJoinResult = 0;

	while (join.nextRecord()) {
		MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator();
		while (buildSide.next(recordReuse) != null) {
			numRecordsInJoinResult++;
		}
	}
	Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult);

	join.close();
	this.memManager.release(join.getFreedMemory());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:59,代码来源:HashTableITCase.java


注:本文中的org.apache.flink.core.memory.MemorySegment.putLong方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。