本文整理汇总了Java中org.apache.flink.core.memory.MemorySegment.put方法的典型用法代码示例。如果您正苦于以下问题:Java MemorySegment.put方法的具体用法?Java MemorySegment.put怎么用?Java MemorySegment.put使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.core.memory.MemorySegment
的用法示例。
在下文中一共展示了MemorySegment.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: putNormalizedKey
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void putNormalizedKey(IntList record, MemorySegment target, int offset, int len) {
final int value = record.getKey() - Integer.MIN_VALUE;
if (len == 4) {
target.putIntBigEndian(offset, value);
}
else if (len <= 0) {
}
else if (len < 4) {
for (int i = 0; len > 0; len--, i++) {
target.put(offset + i, (byte) ((value >>> ((3-i)<<3)) & 0xff));
}
}
else {
target.putIntBigEndian(offset, value);
for (int i = 4; i < len; i++) {
target.put(offset + i, (byte) 0);
}
}
}
示例2: putNormalizedKey
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void putNormalizedKey(Long lValue, MemorySegment target, int offset, int numBytes) {
long value = lValue.longValue() - Long.MIN_VALUE;
// see IntValue for an explanation of the logic
if (numBytes == 8) {
// default case, full normalized key
target.putLongBigEndian(offset, value);
}
else if (numBytes <= 0) {
}
else if (numBytes < 8) {
for (int i = 0; numBytes > 0; numBytes--, i++) {
target.put(offset + i, (byte) (value >>> ((7-i)<<3)));
}
}
else {
target.putLongBigEndian(offset, value);
for (int i = 8; i < numBytes; i++) {
target.put(offset + i, (byte) 0);
}
}
}
示例3: createBuffer
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private static BufferOrEvent createBuffer(int channel) {
final int size = sizeCounter++;
byte[] bytes = new byte[size];
RND.nextBytes(bytes);
MemorySegment memory = MemorySegmentFactory.allocateUnpooledSegment(PAGE_SIZE);
memory.put(0, bytes);
Buffer buf = new NetworkBuffer(memory, FreeingBufferRecycler.INSTANCE);
buf.setSize(size);
// retain an additional time so it does not get disposed after being read by the input gate
buf.retainBuffer();
return new BufferOrEvent(buf, channel);
}
示例4: putNormalizedKey
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void putNormalizedKey(T iValue, MemorySegment target, int offset, int numBytes) {
int value = iValue.ordinal() - Integer.MIN_VALUE;
// see IntValue for an explanation of the logic
if (numBytes == 4) {
// default case, full normalized key
target.putIntBigEndian(offset, value);
}
else if (numBytes <= 0) {
}
else if (numBytes < 4) {
for (int i = 0; numBytes > 0; numBytes--, i++) {
target.put(offset + i, (byte) (value >>> ((3-i)<<3)));
}
}
else {
target.putLongBigEndian(offset, value);
for (int i = 4; i < numBytes; i++) {
target.put(offset + i, (byte) 0);
}
}
}
示例5: putNormalizedKeyDate
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
public static void putNormalizedKeyDate(Date record, MemorySegment target, int offset, int numBytes) {
final long value = record.getTime() - Long.MIN_VALUE;
// see IntValue for an explanation of the logic
if (numBytes == 8) {
// default case, full normalized key
target.putLongBigEndian(offset, value);
}
else if (numBytes < 8) {
for (int i = 0; numBytes > 0; numBytes--, i++) {
target.put(offset + i, (byte) (value >>> ((7-i)<<3)));
}
}
else {
target.putLongBigEndian(offset, value);
for (int i = 8; i < numBytes; i++) {
target.put(offset + i, (byte) 0);
}
}
}
示例6: putNormalizedKey
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void putNormalizedKey(Integer iValue, MemorySegment target, int offset, int numBytes) {
int value = iValue.intValue() - Integer.MIN_VALUE;
// see IntValue for an explanation of the logic
if (numBytes == 4) {
// default case, full normalized key
target.putIntBigEndian(offset, value);
}
else if (numBytes <= 0) {
}
else if (numBytes < 4) {
for (int i = 0; numBytes > 0; numBytes--, i++) {
target.put(offset + i, (byte) (value >>> ((3-i)<<3)));
}
}
else {
target.putLongBigEndian(offset, value);
for (int i = 4; i < numBytes; i++) {
target.put(offset + i, (byte) 0);
}
}
}
示例7: putNormalizedKey
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void putNormalizedKey(Character value, MemorySegment target, int offset, int numBytes) {
// note that the char is an unsigned data type in java and consequently needs
// no code that transforms the signed representation to an offset representation
// that is equivalent to unsigned, when compared byte by byte
if (numBytes == 2) {
// default case, full normalized key
target.put(offset, (byte) ((value >>> 8) & 0xff));
target.put(offset + 1, (byte) ((value ) & 0xff));
}
else if (numBytes <= 0) {
}
else if (numBytes == 1) {
target.put(offset, (byte) ((value >>> 8) & 0xff));
}
else {
target.put(offset, (byte) ((value >>> 8) & 0xff));
target.put(offset + 1, (byte) ((value ) & 0xff));
for (int i = 2; i < numBytes; i++) {
target.put(offset + i, (byte) 0);
}
}
}
示例8: copyNormalizedKey
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void copyNormalizedKey(MemorySegment target, int offset, int len) {
// note that the char is an unsigned data type in java and consequently needs
// no code that transforms the signed representation to an offset representation
// that is equivalent to unsigned, when compared byte by byte
if (len == 2) {
// default case, full normalized key
target.put(offset, (byte) ((value >>> 8) & 0xff));
target.put(offset + 1, (byte) ((value ) & 0xff));
}
else if (len <= 0) {
}
else if (len == 1) {
target.put(offset, (byte) ((value >>> 8) & 0xff));
}
else {
target.put(offset, (byte) ((value >>> 8) & 0xff));
target.put(offset + 1, (byte) ((value ) & 0xff));
for (int i = 2; i < len; i++) {
target.put(offset + i, (byte) 0);
}
}
}
示例9: putNormalizedKey
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void putNormalizedKey(IntPair record, MemorySegment target, int offset, int len) {
// see IntValue for a documentation of the logic
final int value = record.getKey() - Integer.MIN_VALUE;
if (len == 4) {
target.putIntBigEndian(offset, value);
} else if (len <= 0) {
} else if (len < 4) {
for (int i = 0; len > 0; len--, i++) {
target.put(offset + i, (byte) ((value >>> ((3 - i) << 3)) & 0xff));
}
} else {
target.putIntBigEndian(offset, value);
for (int i = 4; i < len; i++) {
target.put(offset + i, (byte) 0);
}
}
}
示例10: putNormalizedKey
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void putNormalizedKey(byte[] record, MemorySegment target, int offset, int numBytes) {
final int limit = offset + numBytes;
target.put(offset, record, 0, Math.min(numBytes, record.length));
offset += record.length;
while (offset < limit) {
target.put(offset++, (byte) 0);
}
}
示例11: generateRandomBuffer
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private static BufferOrEvent generateRandomBuffer(int size, int channelIndex) {
MemorySegment seg = MemorySegmentFactory.allocateUnpooledSegment(PAGE_SIZE);
for (int i = 0; i < size; i++) {
seg.put(i, (byte) i);
}
Buffer buf = new NetworkBuffer(seg, FreeingBufferRecycler.INSTANCE);
buf.setSize(size);
return new BufferOrEvent(buf, channelIndex);
}
示例12: putNormalizedKey
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
/**
* Adds a normalized key containing a normalized order of magnitude of the given record.
* 2 bits determine the sign (negative, zero, positive), 33 bits determine the magnitude.
* This method adds at most 5 bytes that contain information.
*/
@Override
public void putNormalizedKey(BigDecimal record, MemorySegment target, int offset, int len) {
final long signum = record.signum();
// order of magnitude
// smallest:
// scale = Integer.MAX, precision = 1 => SMALLEST_MAGNITUDE
// largest:
// scale = Integer.MIN, precision = Integer.MAX => LARGEST_MAGNITUDE
final long mag = ((long) record.scale()) - ((long) record.precision()) + 1;
// normalize value range: from 0 to (SMALLEST_MAGNITUDE + -1*LARGEST_MAGNITUDE)
final long normMag = -1L * LARGEST_MAGNITUDE + mag;
// normalize value range dependent on sign:
// 0 to (SMALLEST_MAGNITUDE + -1*LARGEST_MAGNITUDE)
// OR (SMALLEST_MAGNITUDE + -1*LARGEST_MAGNITUDE) to 0
// --> uses at most 33 bit (5 least-significant bytes)
long signNormMag = signum < 0 ? normMag : (SMALLEST_MAGNITUDE + -1L * LARGEST_MAGNITUDE - normMag);
// zero has no magnitude
// set 34th bit to flag zero
if (signum == 0) {
signNormMag = 0L;
signNormMag |= (1L << 34);
}
// set 35th bit to flag positive sign
else if (signum > 0) {
signNormMag |= (1L << 35);
}
// add 5 least-significant bytes that contain value to target
for (int i = 0; i < 5 && len > 0; i++, len--) {
final byte b = (byte) (signNormMag >>> (8 * (4 - i)));
target.put(offset++, b);
}
}
示例13: copyNormalizedKey
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void copyNormalizedKey(MemorySegment target, int offset, int len) {
// cache variables on stack, avoid repeated dereferencing of "this"
final char[] chars = this.value;
final int limit = offset + len;
final int end = this.len;
int pos = 0;
while (pos < end && offset < limit) {
char c = chars[pos++];
if (c < HIGH_BIT) {
target.put(offset++, (byte) c);
}
else if (c < HIGH_BIT2) {
target.put(offset++, (byte) ((c >>> 7) | HIGH_BIT));
if (offset < limit) {
target.put(offset++, (byte) c);
}
}
else {
target.put(offset++, (byte) ((c >>> 10) | HIGH_BIT2_MASK));
if (offset < limit) {
target.put(offset++, (byte) (c >>> 2));
}
if (offset < limit) {
target.put(offset++, (byte) c);
}
}
}
while (offset < limit) {
target.put(offset++, (byte) 0);
}
}
示例14: copyNormalizedKey
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
@Override
public void copyNormalizedKey(MemorySegment target, int offset, int len) {
if (len > 0) {
target.put(offset, (byte) (this.value ? 1 : 0));
for (offset = offset + 1; len > 1; len--) {
target.put(offset++, (byte) 0);
}
}
}
示例15: initTable
import org.apache.flink.core.memory.MemorySegment; //导入方法依赖的package包/类
private void initTable(int numBuckets, byte numPartitions) {
final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1);
final MemorySegment[] table = new MemorySegment[numSegs];
// go over all segments that are part of the table
for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) {
final MemorySegment seg = getNextBuffer();
// go over all buckets in the segment
for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) {
final int bucketOffset = k * HASH_BUCKET_SIZE;
// compute the partition that the bucket corresponds to
final byte partition = assignPartition(bucket, numPartitions);
// initialize the header fields
seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition);
seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0);
seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET);
}
table[i] = seg;
}
this.buckets = table;
this.numBuckets = numBuckets;
}