本文整理汇总了Java中io.netty.buffer.DrillBuf.release方法的典型用法代码示例。如果您正苦于以下问题:Java DrillBuf.release方法的具体用法?Java DrillBuf.release怎么用?Java DrillBuf.release使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类io.netty.buffer.DrillBuf
的用法示例。
在下文中一共展示了DrillBuf.release方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import io.netty.buffer.DrillBuf; //导入方法依赖的package包/类
@Override
public void run() {
try {
Thread.sleep(8000);
logger.info("Starting release.");
for (final DrillBuf buf : bufs) {
buf.release();
}
logger.info("Finished release.");
} catch (InterruptedException e) {
return;
}
// start another.
Alloc alloc = new Alloc(a);
alloc.start();
}
示例2: replace
import io.netty.buffer.DrillBuf; //导入方法依赖的package包/类
public DrillBuf replace(DrillBuf old, int newSize) {
if (managedBuffers.remove(old.memoryAddress()) == null) {
throw new IllegalStateException("Tried to remove unmanaged buffer.");
}
old.release();
return getManagedBuffer(newSize);
}
示例3: replace
import io.netty.buffer.DrillBuf; //导入方法依赖的package包/类
public DrillBuf replace(DrillBuf old, int newSize) {
if (managedBuffers.remove(old.memoryAddress()) == null) {
throw new IllegalStateException("Tried to remove unmanaged buffer.");
}
old.release(1);
return getManagedBuffer(newSize);
}
示例4: readFromStream
import io.netty.buffer.DrillBuf; //导入方法依赖的package包/类
/**
* Reads from an InputStream and parses a RecordBatchDef. From this, we construct a SelectionVector2 if it exits
* and construct the vectors and add them to a vector container
* @param input the InputStream to read from
* @throws IOException
*/
@Override
public void readFromStream(InputStream input) throws IOException {
final VectorContainer container = new VectorContainer();
final UserBitShared.RecordBatchDef batchDef = UserBitShared.RecordBatchDef.parseDelimitedFrom(input);
recordCount = batchDef.getRecordCount();
if (batchDef.hasCarriesTwoByteSelectionVector() && batchDef.getCarriesTwoByteSelectionVector()) {
if (sv2 == null) {
sv2 = new SelectionVector2(allocator);
}
sv2.allocateNew(recordCount * SelectionVector2.RECORD_SIZE);
sv2.getBuffer().setBytes(0, input, recordCount * SelectionVector2.RECORD_SIZE);
svMode = BatchSchema.SelectionVectorMode.TWO_BYTE;
}
final List<ValueVector> vectorList = Lists.newArrayList();
final List<SerializedField> fieldList = batchDef.getFieldList();
for (SerializedField metaData : fieldList) {
final int dataLength = metaData.getBufferLength();
final MaterializedField field = MaterializedField.create(metaData);
final DrillBuf buf = allocator.buffer(dataLength);
final ValueVector vector;
try {
buf.writeBytes(input, dataLength);
vector = TypeHelper.getNewVector(field, allocator);
vector.load(metaData, buf);
} finally {
buf.release();
}
vectorList.add(vector);
}
container.addCollection(vectorList);
container.buildSchema(svMode);
container.setRecordCount(recordCount);
va = container;
}
示例5: clear
import io.netty.buffer.DrillBuf; //导入方法依赖的package包/类
public void clear() {
if(cleared) {
return;
}
for (DrillBuf buf : buffers) {
buf.release();
}
cleared = true;
}
示例6: close
import io.netty.buffer.DrillBuf; //导入方法依赖的package包/类
@Override
public void close() {
// Don't leak unused pre-allocated memory.
if (!svAllocatorUsed) {
final DrillBuf drillBuf = svAllocator.getAllocation();
if (drillBuf != null) {
drillBuf.release();
}
}
}
示例7: close
import io.netty.buffer.DrillBuf; //导入方法依赖的package包/类
@Override
public void close() {
if (!svAllocatorUsed) {
final DrillBuf drillBuf = svAllocator.getAllocation();
if (drillBuf != null) {
drillBuf.release();
}
}
}
示例8: testHadooopVInt
import io.netty.buffer.DrillBuf; //导入方法依赖的package包/类
@Test
public void testHadooopVInt() throws Exception {
final int _0 = 0;
final int _9 = 9;
final DrillBuf buffer = getAllocator().buffer(_9);
long longVal = 0;
buffer.clear();
HadoopWritables.writeVLong(buffer, _0, _9, 0);
longVal = HadoopWritables.readVLong(buffer, _0, _9);
assertEquals(longVal, 0);
buffer.clear();
HadoopWritables.writeVLong(buffer, _0, _9, Long.MAX_VALUE);
longVal = HadoopWritables.readVLong(buffer, _0, _9);
assertEquals(longVal, Long.MAX_VALUE);
buffer.clear();
HadoopWritables.writeVLong(buffer, _0, _9, Long.MIN_VALUE);
longVal = HadoopWritables.readVLong(buffer, _0, _9);
assertEquals(longVal, Long.MIN_VALUE);
int intVal = 0;
buffer.clear();
HadoopWritables.writeVInt(buffer, _0, _9, 0);
intVal = HadoopWritables.readVInt(buffer, _0, _9);
assertEquals(intVal, 0);
buffer.clear();
HadoopWritables.writeVInt(buffer, _0, _9, Integer.MAX_VALUE);
intVal = HadoopWritables.readVInt(buffer, _0, _9);
assertEquals(intVal, Integer.MAX_VALUE);
buffer.clear();
HadoopWritables.writeVInt(buffer, _0, _9, Integer.MIN_VALUE);
intVal = HadoopWritables.readVInt(buffer, _0, _9);
assertEquals(intVal, Integer.MIN_VALUE);
buffer.release();
}
示例9: writeToStream
import io.netty.buffer.DrillBuf; //导入方法依赖的package包/类
/**
* Serializes the VectorAccessible va and writes it to an output stream
* @param output the OutputStream to write to
* @throws IOException
*/
@Override
public void writeToStream(OutputStream output) throws IOException {
Preconditions.checkNotNull(output);
final Timer.Context timerContext = metrics.timer(WRITER_TIMER).time();
final DrillBuf[] incomingBuffers = batch.getBuffers();
final UserBitShared.RecordBatchDef batchDef = batch.getDef();
/* DrillBuf associated with the selection vector */
DrillBuf svBuf = null;
Integer svCount = null;
if (svMode == BatchSchema.SelectionVectorMode.TWO_BYTE) {
svCount = sv2.getCount();
svBuf = sv2.getBuffer(); //this calls retain() internally
}
try {
/* Write the metadata to the file */
batchDef.writeDelimitedTo(output);
/* If we have a selection vector, dump it to file first */
if (svBuf != null) {
svBuf.getBytes(0, output, svBuf.readableBytes());
sv2.setBuffer(svBuf);
svBuf.release(); // sv2 now owns the buffer
sv2.setRecordCount(svCount);
}
/* Dump the array of ByteBuf's associated with the value vectors */
for (DrillBuf buf : incomingBuffers) {
/* dump the buffer into the OutputStream */
int bufLength = buf.readableBytes();
buf.getBytes(0, output, bufLength);
}
output.flush();
timerContext.stop();
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
clear();
}
}
示例10: getBigDecimalFromDense
import io.netty.buffer.DrillBuf; //导入方法依赖的package包/类
public static BigDecimal getBigDecimalFromDense(DrillBuf data, int startIndex, int nDecimalDigits, int scale, int maxPrecision, int width) {
/* This method converts the dense representation to
* an intermediate representation. The intermediate
* representation has one more integer than the dense
* representation.
*/
byte[] intermediateBytes = new byte[((nDecimalDigits + 1) * integerSize)];
// Start storing from the least significant byte of the first integer
int intermediateIndex = 3;
int[] mask = {0x03, 0x0F, 0x3F, 0xFF};
int[] reverseMask = {0xFC, 0xF0, 0xC0, 0x00};
int maskIndex;
int shiftOrder;
byte shiftBits;
// TODO: Some of the logic here is common with casting from Dense to Sparse types, factor out common code
if (maxPrecision == 38) {
maskIndex = 0;
shiftOrder = 6;
shiftBits = 0x00;
intermediateBytes[intermediateIndex++] = (byte) (data.getByte(startIndex) & 0x7F);
} else if (maxPrecision == 28) {
maskIndex = 1;
shiftOrder = 4;
shiftBits = (byte) ((data.getByte(startIndex) & 0x03) << shiftOrder);
intermediateBytes[intermediateIndex++] = (byte) (((data.getByte(startIndex) & 0x3C) & 0xFF) >>> 2);
} else {
throw new UnsupportedOperationException("Dense types with max precision 38 and 28 are only supported");
}
int inputIndex = 1;
boolean sign = false;
if ((data.getByte(startIndex) & 0x80) != 0) {
sign = true;
}
while (inputIndex < width) {
intermediateBytes[intermediateIndex] = (byte) ((shiftBits) | (((data.getByte(startIndex + inputIndex) & reverseMask[maskIndex]) & 0xFF) >>> (8 - shiftOrder)));
shiftBits = (byte) ((data.getByte(startIndex + inputIndex) & mask[maskIndex]) << shiftOrder);
inputIndex++;
intermediateIndex++;
if (((inputIndex - 1) % integerSize) == 0) {
shiftBits = (byte) ((shiftBits & 0xFF) >>> 2);
maskIndex++;
shiftOrder -= 2;
}
}
/* copy the last byte */
intermediateBytes[intermediateIndex] = shiftBits;
if (sign == true) {
intermediateBytes[0] = (byte) (intermediateBytes[0] | 0x80);
}
DrillBuf intermediate = data.getAllocator().buffer(intermediateBytes.length);
intermediate.setBytes(0, intermediateBytes);
BigDecimal ret = getBigDecimalFromIntermediate(intermediate, 0, nDecimalDigits + 1, scale);
intermediate.release();
return ret;
}