当前位置: 首页>>代码示例>>Java>>正文


Java MemorySegment.putInt方法代码示例

本文整理汇总了Java中org.apache.flink.core.memory.MemorySegment.putInt方法的典型用法代码示例。如果您正苦于以下问题:Java MemorySegment.putInt方法的具体用法?Java MemorySegment.putInt怎么用?Java MemorySegment.putInt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.core.memory.MemorySegment的用法示例。


在下文中一共展示了MemorySegment.putInt方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: nextSegment

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
protected MemorySegment nextSegment(MemorySegment current, int positionInCurrent) throws IOException {
	current.putInt(0, positionInCurrent);

	// check if we keep the segment in memory, or if we spill it
	if (emptyBuffers.size() > numSegmentsSpillingThreshold) {
		// keep buffer in memory
		fullBuffers.addLast(current);
	} else {
		// spill all buffers up to now
		// check, whether we have a channel already
		if (currentWriter == null) {
			currentWriter = ioManager.createBlockChannelWriter(channelEnumerator.next(), emptyBuffers);
		}

		// spill all elements gathered up to now
		numBuffersSpilled += fullBuffers.size();
		while (fullBuffers.size() > 0) {
			currentWriter.writeBlock(fullBuffers.removeFirst());
		}
		currentWriter.writeBlock(current);
		numBuffersSpilled++;
	}

	try {
		return emptyBuffers.take();
	} catch (InterruptedException iex) {
		throw new RuntimeException("Spilling Fifo Queue was interrupted while waiting for next buffer.");
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:31,代码来源:SerializedUpdateBuffer.java

示例2: initTable

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private void initTable(int numBuckets, byte numPartitions) {
	final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
	final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1);
	final MemorySegment[] table = new MemorySegment[numSegs];
	
	// go over all segments that are part of the table
	for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) {
		final MemorySegment seg = getNextBuffer();
		
		// go over all buckets in the segment
		for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) {
			final int bucketOffset = k * HASH_BUCKET_SIZE;	
			
			// compute the partition that the bucket corresponds to
			final byte partition = assignPartition(bucket, numPartitions);
			
			// initialize the header fields
			seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition);
			seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0);
			seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET);
		}
		
		table[i] = seg;
	}
	this.buckets = table;
	this.numBuckets = numBuckets;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:28,代码来源:CompactingHashTable.java

示例3: writeSegment

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private void writeSegment(MemorySegment segment, int writePosition, boolean lastSegment) throws IOException
{
	segment.putShort(0, HEADER_MAGIC_NUMBER);
	segment.putShort(HEADER_FLAGS_OFFSET, lastSegment ? FLAG_LAST_BLOCK : 0);
	segment.putInt(HEAD_BLOCK_LENGTH_OFFSET, writePosition);
	
	this.writer.writeBlock(segment);
	this.bytesBeforeSegment += writePosition - HEADER_LENGTH;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:10,代码来源:ChannelWriterOutputView.java

示例4: fillBufferWithAscendingNumbers

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
static int fillBufferWithAscendingNumbers(Buffer buffer, int currentNumber, int size) {
	checkArgument(size % 4 == 0);

	MemorySegment segment = buffer.getMemorySegment();

	for (int i = 0; i < size; i += 4) {
		segment.putInt(i, currentNumber++);
	}
	buffer.setSize(size);

	return currentNumber;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:13,代码来源:BufferFileWriterReaderTest.java

示例5: channelReadWriteOneSegment

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Test
public void channelReadWriteOneSegment() {
	final int NUM_IOS = 1111;
	
	try {
		final FileIOChannel.ID channelID = this.ioManager.createChannel();
		final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channelID);
		
		MemorySegment memSeg = MemorySegmentFactory.allocateUnpooledSegment(32 * 1024);
		
		for (int i = 0; i < NUM_IOS; i++) {
			for (int pos = 0; pos < memSeg.size(); pos += 4) {
				memSeg.putInt(pos, i);
			}
			
			writer.writeBlock(memSeg);
			memSeg = writer.getNextReturnedBlock();
		}
		
		writer.close();
		
		final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channelID);
		for (int i = 0; i < NUM_IOS; i++) {
			reader.readBlock(memSeg);
			memSeg = reader.getNextReturnedBlock();
			
			for (int pos = 0; pos < memSeg.size(); pos += 4) {
				if (memSeg.getInt(pos) != i) {
					fail("Read memory segment contains invalid data.");
				}
			}
		}
		
		reader.closeAndDelete();
	}
	catch (Exception ex) {
		ex.printStackTrace();
		fail("Test encountered an exception: " + ex.getMessage());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:41,代码来源:IOManagerAsyncTest.java

示例6: insertBucketEntryFromSearch

import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
 * IMPORTANT!!! We pass only the partition number, because we must make sure we get a fresh
 * partition reference. The partition reference used during search for the key may have become
 * invalid during the compaction.
 */
private void insertBucketEntryFromSearch(MemorySegment originalBucket, MemorySegment currentBucket,
											int originalBucketOffset, int currentBucketOffset,
											int countInCurrentBucket, long originalForwardPointer,
											int hashCode, long pointer, int partitionNumber) throws IOException {
	boolean checkForResize = false;
	if (countInCurrentBucket < NUM_ENTRIES_PER_BUCKET) {
		// we are good in our current bucket, put the values
		currentBucket.putInt(currentBucketOffset + BUCKET_HEADER_LENGTH + (countInCurrentBucket * HASH_CODE_LEN), hashCode); // hash code
		currentBucket.putLong(currentBucketOffset + BUCKET_POINTER_START_OFFSET + (countInCurrentBucket * POINTER_LEN), pointer); // pointer
		currentBucket.putInt(currentBucketOffset + HEADER_COUNT_OFFSET, countInCurrentBucket + 1); // update count
	}
	else {
		// we go to a new overflow bucket
		final InMemoryPartition<T> partition = this.partitions.get(partitionNumber);
		MemorySegment overflowSeg;
		final int overflowSegmentNum;
		final int overflowBucketOffset;
		
		// first, see if there is space for an overflow bucket remaining in the last overflow segment
		if (partition.nextOverflowBucket == 0) {
			// no space left in last bucket, or no bucket yet, so create an overflow segment
			overflowSeg = getNextBuffer();
			overflowBucketOffset = 0;
			overflowSegmentNum = partition.numOverflowSegments;
			
			// add the new overflow segment
			if (partition.overflowSegments.length <= partition.numOverflowSegments) {
				MemorySegment[] newSegsArray = new MemorySegment[partition.overflowSegments.length * 2];
				System.arraycopy(partition.overflowSegments, 0, newSegsArray, 0, partition.overflowSegments.length);
				partition.overflowSegments = newSegsArray;
			}
			partition.overflowSegments[partition.numOverflowSegments] = overflowSeg;
			partition.numOverflowSegments++;
			checkForResize = true;
		}
		else {
			// there is space in the last overflow segment
			overflowSegmentNum = partition.numOverflowSegments - 1;
			overflowSeg = partition.overflowSegments[overflowSegmentNum];
			overflowBucketOffset = partition.nextOverflowBucket << NUM_INTRA_BUCKET_BITS;
		}
		
		// next overflow bucket is one ahead. if the segment is full, the next will be at the beginning
		// of a new segment
		partition.nextOverflowBucket = (partition.nextOverflowBucket == this.bucketsPerSegmentMask ? 0 : partition.nextOverflowBucket + 1);
		
		// insert the new overflow bucket in the chain of buckets
		
		// 1) set the old forward pointer
		// 2) let the bucket in the main table point to this one
		overflowSeg.putLong(overflowBucketOffset + HEADER_FORWARD_OFFSET, originalForwardPointer);
		final long pointerToNewBucket = (((long) overflowSegmentNum) << 32) | ((long) overflowBucketOffset);
		originalBucket.putLong(originalBucketOffset + HEADER_FORWARD_OFFSET, pointerToNewBucket);
		
		// finally, insert the values into the overflow buckets
		overflowSeg.putInt(overflowBucketOffset + BUCKET_HEADER_LENGTH, hashCode);	// hash code
		overflowSeg.putLong(overflowBucketOffset + BUCKET_POINTER_START_OFFSET, pointer); // pointer
		
		// set the count to one
		overflowSeg.putInt(overflowBucketOffset + HEADER_COUNT_OFFSET, 1);
		if(checkForResize && !this.isResizing) {
			// check if we should resize buckets
			if(this.buckets.length <= getOverflowSegmentCount()) {
				resizeHashTable();
			}
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:74,代码来源:CompactingHashTable.java


注:本文中的org.apache.flink.core.memory.MemorySegment.putInt方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。