当前位置: 首页>>代码示例>>Java>>正文


Java MemorySegment.getInt方法代码示例

本文整理汇总了Java中org.apache.flink.core.memory.MemorySegment.getInt方法的典型用法代码示例。如果您正苦于以下问题:Java MemorySegment.getInt方法的具体用法?Java MemorySegment.getInt怎么用?Java MemorySegment.getInt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.core.memory.MemorySegment的用法示例。


在下文中一共展示了MemorySegment.getInt方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: ReadEnd

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private ReadEnd(MemorySegment firstMemSegment, LinkedBlockingQueue<MemorySegment> emptyBufferTarget,
								Deque<MemorySegment> fullBufferSource, BlockChannelReader<MemorySegment> spilledBufferSource,
								List<MemorySegment> emptyBuffers, int numBuffersSpilled)
	throws IOException {
	super(firstMemSegment, firstMemSegment.getInt(0), HEADER_LENGTH);

	this.emptyBufferTarget = emptyBufferTarget;
	this.fullBufferSource = fullBufferSource;

	this.spilledBufferSource = spilledBufferSource;

	requestsRemaining = numBuffersSpilled;
	this.spilledBuffersRemaining = numBuffersSpilled;

	// send the first requests
	while (requestsRemaining > 0 && emptyBuffers.size() > 0) {
		this.spilledBufferSource.readBlock(emptyBuffers.remove(emptyBuffers.size() - 1));
		requestsRemaining--;
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:21,代码来源:SerializedUpdateBuffer.java

示例2: buildBloomFilterForBucket

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * Set all the bucket memory except bucket header as the bit set of bloom filter, and use hash code of build records
 * to build bloom filter.
 */
final void buildBloomFilterForBucket(int bucketInSegmentPos, MemorySegment bucket, HashPartition<BT, PT> p) {
	final int count = bucket.getShort(bucketInSegmentPos + HEADER_COUNT_OFFSET);
	if (count <= 0) {
		return;
	}

	int[] hashCodes = new int[count];
	// As the hashcode and bloom filter occupy same bytes, so we read all hashcode out at first and then write back to bloom filter.
	for (int i = 0; i < count; i++) {
		hashCodes[i] = bucket.getInt(bucketInSegmentPos + BUCKET_HEADER_LENGTH + i * HASH_CODE_LEN);
	}
	this.bloomFilter.setBitsLocation(bucket, bucketInSegmentPos + BUCKET_HEADER_LENGTH);
	for (int hashCode : hashCodes) {
		this.bloomFilter.addHash(hashCode);
	}
	buildBloomFilterForExtraOverflowSegments(bucketInSegmentPos, bucket, p);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:22,代码来源:MutableHashTable.java

示例3: buildBloomFilterForExtraOverflowSegments

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private void buildBloomFilterForExtraOverflowSegments(int bucketInSegmentPos, MemorySegment bucket, HashPartition<BT, PT> p) {
	int totalCount = 0;
	boolean skip = false;
	long forwardPointer = bucket.getLong(bucketInSegmentPos + HEADER_FORWARD_OFFSET);
	while (forwardPointer != BUCKET_FORWARD_POINTER_NOT_SET) {
		final int overflowSegNum = (int) (forwardPointer >>> 32);
		if (overflowSegNum < 0 || overflowSegNum >= p.numOverflowSegments) {
			skip = true;
			break;
		}
		MemorySegment overflowSegment = p.overflowSegments[overflowSegNum];
		int bucketInOverflowSegmentOffset = (int) forwardPointer;
		
		final int count = overflowSegment.getShort(bucketInOverflowSegmentOffset + HEADER_COUNT_OFFSET);
		totalCount += count;
		// The bits size of bloom filter per bucket is 112 * 8, while expected input entries is greater than 2048, the fpp would higher than 0.9,
		// which make the bloom filter an overhead instead of optimization.
		if (totalCount > 2048) {
			skip = true;
			break;
		}
		
		for (int i = 0; i < count; i++) {
			int hashCode = overflowSegment.getInt(bucketInOverflowSegmentOffset + BUCKET_HEADER_LENGTH + i * HASH_CODE_LEN);
			this.bloomFilter.addHash(hashCode);
		}
		
		forwardPointer = overflowSegment.getLong(bucketInOverflowSegmentOffset + HEADER_FORWARD_OFFSET);
		
	}
	
	if (!skip) {
		bucket.put(bucketInSegmentPos + HEADER_STATUS_OFFSET, BUCKET_STATUS_IN_FILTER);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:36,代码来源:MutableHashTable.java

示例4: verifyBufferFilledWithAscendingNumbers

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
static int verifyBufferFilledWithAscendingNumbers(Buffer buffer, int currentNumber) {
	MemorySegment segment = buffer.getMemorySegment();

	int size = buffer.getSize();

	for (int i = 0; i < size; i += 4) {
		if (segment.getInt(i) != currentNumber++) {
			throw new IllegalStateException("Read unexpected number from buffer.");
		}
	}

	return currentNumber;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:14,代码来源:BufferFileWriterReaderTest.java

示例5: channelReadWriteOneSegment

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Test
public void channelReadWriteOneSegment() {
	final int NUM_IOS = 1111;
	
	try {
		final FileIOChannel.ID channelID = this.ioManager.createChannel();
		final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channelID);
		
		MemorySegment memSeg = MemorySegmentFactory.allocateUnpooledSegment(32 * 1024);
		
		for (int i = 0; i < NUM_IOS; i++) {
			for (int pos = 0; pos < memSeg.size(); pos += 4) {
				memSeg.putInt(pos, i);
			}
			
			writer.writeBlock(memSeg);
			memSeg = writer.getNextReturnedBlock();
		}
		
		writer.close();
		
		final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channelID);
		for (int i = 0; i < NUM_IOS; i++) {
			reader.readBlock(memSeg);
			memSeg = reader.getNextReturnedBlock();
			
			for (int pos = 0; pos < memSeg.size(); pos += 4) {
				if (memSeg.getInt(pos) != i) {
					fail("Read memory segment contains invalid data.");
				}
			}
		}
		
		reader.closeAndDelete();
	}
	catch (Exception ex) {
		ex.printStackTrace();
		fail("Test encountered an exception: " + ex.getMessage());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:41,代码来源:IOManagerAsyncTest.java

示例6: getLimitForSegment

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
protected int getLimitForSegment(MemorySegment segment) {
	return segment.getInt(0);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:5,代码来源:SerializedUpdateBuffer.java

示例7: compactPartition

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * Compacts (garbage collects) partition with copy-compact strategy using compaction partition
 * 
 * @param partitionNumber partition to compact
 * @throws IOException 
 */
private void compactPartition(final int partitionNumber) throws IOException {
	// do nothing if table was closed, parameter is invalid or no garbage exists
	if (this.closed || partitionNumber >= this.partitions.size() || this.partitions.get(partitionNumber).isCompacted()) {
		return;
	}
	// release all segments owned by compaction partition
	this.compactionMemory.clearAllMemory(availableMemory);
	this.compactionMemory.allocateSegments(1);
	this.compactionMemory.pushDownPages();
	T tempHolder = this.buildSideSerializer.createInstance();
	final int numPartitions = this.partitions.size();
	InMemoryPartition<T> partition = this.partitions.remove(partitionNumber);
	MemorySegment[] overflowSegments = partition.overflowSegments;
	long pointer;
	int pointerOffset;
	int bucketOffset;
	final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
	for (int i = 0, bucket = partitionNumber; i < this.buckets.length && bucket < this.numBuckets; i++) {
		MemorySegment segment = this.buckets[i];
		// go over all buckets in the segment belonging to the partition
		for (int k = bucket % bucketsPerSegment; k < bucketsPerSegment && bucket < this.numBuckets; k += numPartitions, bucket += numPartitions) {
			bucketOffset = k * HASH_BUCKET_SIZE;
			if((int)segment.get(bucketOffset + HEADER_PARTITION_OFFSET) != partitionNumber) {
				throw new IOException("Accessed wrong bucket! wanted: " + partitionNumber + " got: " + segment.get(bucketOffset + HEADER_PARTITION_OFFSET));
			}
			// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
			int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
			int numInSegment = 0;
			pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
			while (true) {
				while (numInSegment < countInSegment) {
					pointer = segment.getLong(pointerOffset);
					tempHolder = partition.readRecordAt(pointer, tempHolder);
					pointer = this.compactionMemory.appendRecord(tempHolder);
					segment.putLong(pointerOffset, pointer);
					pointerOffset += POINTER_LEN;
					numInSegment++;
				}
				// this segment is done. check if there is another chained bucket
				final long forwardPointer = segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
				if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
					break;
				}
				final int overflowSegNum = (int) (forwardPointer >>> 32);
				segment = overflowSegments[overflowSegNum];
				bucketOffset = (int) forwardPointer;
				countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
				pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
				numInSegment = 0;
			}
			segment = this.buckets[i];
		}
	}
	// swap partition with compaction partition
	this.compactionMemory.setPartitionNumber(partitionNumber);
	this.partitions.add(partitionNumber, compactionMemory);
	this.partitions.get(partitionNumber).overflowSegments = partition.overflowSegments;
	this.partitions.get(partitionNumber).numOverflowSegments = partition.numOverflowSegments;
	this.partitions.get(partitionNumber).nextOverflowBucket = partition.nextOverflowBucket;
	this.partitions.get(partitionNumber).setIsCompacted(true);
	//this.partitions.get(partitionNumber).pushDownPages();
	this.compactionMemory = partition;
	this.compactionMemory.resetRecordCounter();
	this.compactionMemory.setPartitionNumber(-1);
	this.compactionMemory.overflowSegments = null;
	this.compactionMemory.numOverflowSegments = 0;
	this.compactionMemory.nextOverflowBucket = 0;
	// try to allocate maximum segment count
	this.compactionMemory.clearAllMemory(this.availableMemory);
	int maxSegmentNumber = this.getMaxPartition();
	this.compactionMemory.allocateSegments(maxSegmentNumber);
	this.compactionMemory.resetRWViews();
	this.compactionMemory.pushDownPages();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:81,代码来源:CompactingHashTable.java

示例8: fillCache

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * utility function that inserts all entries from a bucket and its overflow buckets into the cache
 * 
 * @return true if last bucket was not reached yet
 * @throws IOException
 */
private boolean fillCache() throws IOException {
	if(currentBucketIndex >= table.numBuckets) {
		return false;
	}
	MemorySegment bucket = table.buckets[currentSegmentIndex];
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(currentBucketOffset + HEADER_PARTITION_OFFSET);
	final InMemoryPartition<T> partition = table.partitions.get(partitionNumber);
	final MemorySegment[] overflowSegments = partition.overflowSegments;
	
	int countInSegment = bucket.getInt(currentBucketOffset + HEADER_COUNT_OFFSET);
	int numInSegment = 0;
	int posInSegment = currentBucketOffset + BUCKET_POINTER_START_OFFSET;
	int bucketOffset = currentBucketOffset;

	// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
	while (true) {
		while (numInSegment < countInSegment) {
			long pointer = bucket.getLong(posInSegment);
			posInSegment += POINTER_LEN;
			numInSegment++;
			T target = table.buildSideSerializer.createInstance();
			try {
				target = partition.readRecordAt(pointer, target);
				cache.add(target);
			} catch (IOException e) {
					throw new RuntimeException("Error deserializing record from the Hash Table: " + e.getMessage(), e);
			}
		}
		// this segment is done. check if there is another chained bucket
		final long forwardPointer = bucket.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
		if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
			break;
		}
		final int overflowSegNum = (int) (forwardPointer >>> 32);
		bucket = overflowSegments[overflowSegNum];
		bucketOffset = (int) forwardPointer;
		countInSegment = bucket.getInt(bucketOffset + HEADER_COUNT_OFFSET);
		posInSegment = bucketOffset + BUCKET_POINTER_START_OFFSET;
		numInSegment = 0;
	}
	currentBucketIndex++;
	if(currentBucketIndex % bucketsPerSegment == 0) {
		currentSegmentIndex++;
		currentBucketOffset = 0;
	} else {
		currentBucketOffset += HASH_BUCKET_SIZE;
	}
	return true;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:57,代码来源:CompactingHashTable.java

示例9: getMatchFor

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
public T getMatchFor(PT probeSideRecord, T reuse) {
	if (closed) {
		return null;
	}
	final int searchHashCode = MathUtils.jenkinsHash(this.probeTypeComparator.hash(probeSideRecord));
	
	final int posHashCode = searchHashCode % numBuckets;
	
	// get the bucket for the given hash code
	MemorySegment bucket = buckets[posHashCode >> bucketsPerSegmentBits];
	int bucketInSegmentOffset = (posHashCode & bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
	final InMemoryPartition<T> p = partitions.get(partitionNumber);
	final MemorySegment[] overflowSegments = p.overflowSegments;
	
	this.pairComparator.setReference(probeSideRecord);
	
	int countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
	int numInSegment = 0;
	int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;

	// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
	while (true) {
		
		while (numInSegment < countInSegment) {
			
			final int thisCode = bucket.getInt(posInSegment);
			posInSegment += HASH_CODE_LEN;
				
			// check if the hash code matches
			if (thisCode == searchHashCode) {
				// get the pointer to the pair
				final int pointerOffset = bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET + (numInSegment * POINTER_LEN);
				final long pointer = bucket.getLong(pointerOffset);
				numInSegment++;
				
				// deserialize the key to check whether it is really equal, or whether we had only a hash collision
				try {
					reuse = p.readRecordAt(pointer, reuse);
					
					if (this.pairComparator.equalToReference(reuse)) {
						this.partition = p;
						this.bucket = bucket;
						this.pointerOffsetInBucket = pointerOffset;
						return reuse;
					}
				}
				catch (IOException e) {
					throw new RuntimeException("Error deserializing record from the hashtable: " + e.getMessage(), e);
				}
			}
			else {
				numInSegment++;
			}
		}
		
		// this segment is done. check if there is another chained bucket
		final long forwardPointer = bucket.getLong(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
		if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
			return null;
		}
		
		final int overflowSegNum = (int) (forwardPointer >>> 32);
		bucket = overflowSegments[overflowSegNum];
		bucketInSegmentOffset = (int) forwardPointer;
		countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
		posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
		numInSegment = 0;
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:73,代码来源:CompactingHashTable.java

示例10: getLimitForSegment

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
protected int getLimitForSegment(MemorySegment segment) {
	return segment.getInt(ChannelWriterOutputView.HEAD_BLOCK_LENGTH_OFFSET);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:5,代码来源:ChannelReaderInputView.java


注:本文中的org.apache.flink.core.memory.MemorySegment.getInt方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。