本文整理汇总了Java中org.apache.drill.exec.vector.AllocationHelper.allocateNew方法的典型用法代码示例。如果您正苦于以下问题:Java AllocationHelper.allocateNew方法的具体用法?Java AllocationHelper.allocateNew怎么用?Java AllocationHelper.allocateNew使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.drill.exec.vector.AllocationHelper
的用法示例。
在下文中一共展示了AllocationHelper.allocateNew方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doAlloc
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
private boolean doAlloc() {
//Allocate vv in the allocationVectors.
for (final ValueVector v : this.allocationVectors) {
AllocationHelper.allocateNew(v, incoming.getRecordCount());
}
//Allocate vv for complexWriters.
if (complexWriters == null) {
return true;
}
for (final ComplexWriter writer : complexWriters) {
writer.allocate();
}
return true;
}
示例2: doAlloc
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
private boolean doAlloc(int recordCount) {
//Allocate vv in the allocationVectors.
for (final ValueVector v : this.allocationVectors) {
AllocationHelper.allocateNew(v, recordCount);
}
//Allocate vv for complexWriters.
if (complexWriters == null) {
return true;
}
for (final ComplexWriter writer : complexWriters) {
writer.allocate();
}
return true;
}
示例3: doAlloc
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
private boolean doAlloc() {
for (ValueVector v : allocationVectors) {
try {
AllocationHelper.allocateNew(v, current.getRecordCount());
} catch (OutOfMemoryRuntimeException ex) {
return false;
}
}
return true;
}
示例4: constructSpecialBatch
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
/**
* Method is invoked when we have a straight aggregate (no group by expression) and our input is empty.
* In this case we construct an outgoing batch with record count as 1. For the nullable vectors we don't set anything
* as we want the output to be NULL. For the required vectors (only for count()) we set the value to be zero since
* we don't zero out our buffers initially while allocating them.
*/
private void constructSpecialBatch() {
int exprIndex = 0;
for (final VectorWrapper<?> vw: container) {
final ValueVector vv = vw.getValueVector();
AllocationHelper.allocateNew(vv, SPECIAL_BATCH_COUNT);
vv.getMutator().setValueCount(SPECIAL_BATCH_COUNT);
if (vv.getField().getType().getMode() == TypeProtos.DataMode.REQUIRED) {
if (vv instanceof FixedWidthVector) {
/*
* The only case we should have a required vector in the aggregate is for count function whose output is
* always a FixedWidthVector (BigIntVector). Zero out the vector.
*/
((FixedWidthVector) vv).zeroVector();
} else {
/*
* If we are in this else block it means that we have a required vector which is of variable length. We
* should not be here, raising an error since we have set the record count to be 1 and not cleared the
* buffer
*/
throw new DrillRuntimeException("FixedWidth vectors is the expected output vector type. " +
"Corresponding expression: " + popConfig.getExprs()[exprIndex].toString());
}
}
exprIndex++;
}
container.setRecordCount(SPECIAL_BATCH_COUNT);
recordCount = SPECIAL_BATCH_COUNT;
}
示例5: populatePartitionVectors
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
protected void populatePartitionVectors(int recordCount) {
for (int i = 0; i < pVectors.size(); i++) {
final ValueVector vector = pVectors.get(i);
final Object val = selectedPartitionValues.get(i);
AllocationHelper.allocateNew(vector, recordCount);
if (val != null) {
HiveUtilities.populateVector(vector, managedBuffer, val, 0, recordCount);
}
vector.getMutator().setValueCount(recordCount);
}
}
示例6: constructSpecialBatch
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
/**
* Method is invoked when we have a straight aggregate (no group by expression) and our input is empty.
* In this case we construct an outgoing batch with record count as 1. For the nullable vectors we don't set anything
* as we want the output to be NULL. For the required vectors (only for count()) we set the value to be zero since
* we don't zero out our buffers initially while allocating them.
*/
@SuppressWarnings("resource")
private void constructSpecialBatch() {
int exprIndex = 0;
for (final VectorWrapper<?> vw: container) {
final ValueVector vv = vw.getValueVector();
AllocationHelper.allocateNew(vv, SPECIAL_BATCH_COUNT);
vv.getMutator().setValueCount(SPECIAL_BATCH_COUNT);
if (vv.getField().getType().getMode() == TypeProtos.DataMode.REQUIRED) {
if (vv instanceof FixedWidthVector) {
/*
* The only case we should have a required vector in the aggregate is for count function whose output is
* always a FixedWidthVector (BigIntVector). Zero out the vector.
*/
((FixedWidthVector) vv).zeroVector();
} else {
/*
* If we are in this else block it means that we have a required vector which is of variable length. We
* should not be here, raising an error since we have set the record count to be 1 and not cleared the
* buffer
*/
throw new DrillRuntimeException("FixedWidth vectors is the expected output vector type. " +
"Corresponding expression: " + popConfig.getExprs().get(exprIndex).toString());
}
}
exprIndex++;
}
container.setRecordCount(SPECIAL_BATCH_COUNT);
recordCount = SPECIAL_BATCH_COUNT;
}
示例7: next
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
/**
* To take into account Hive "skip.header.lines.count" property first N values from file are skipped.
* Since file can be read in batches (depends on TARGET_RECORD_COUNT), additional checks are made
* to determine if it's new file or continuance.
*
* To take into account Hive "skip.footer.lines.count" property values are buffered in queue
* until queue size exceeds number of footer lines to skip, then first value in queue is retrieved.
* Buffer of value objects is used to re-use value objects in order to reduce number of created value objects.
* For each new file queue is cleared to drop footer lines from previous file.
*/
@Override
public int next() {
for (ValueVector vv : vectors) {
AllocationHelper.allocateNew(vv, TARGET_RECORD_COUNT);
}
if (empty) {
setValueCountAndPopulatePartitionVectors(0);
return 0;
}
try {
skipRecordsInspector.reset();
int recordCount = 0;
Object value;
while (recordCount < TARGET_RECORD_COUNT && reader.next(key, value = skipRecordsInspector.getNextValue())) {
if (skipRecordsInspector.doSkipHeader(recordCount++)) {
continue;
}
Object bufferedValue = skipRecordsInspector.bufferAdd(value);
if (bufferedValue != null) {
Object deSerializedValue = partitionSerDe.deserialize((Writable) bufferedValue);
if (partTblObjectInspectorConverter != null) {
deSerializedValue = partTblObjectInspectorConverter.convert(deSerializedValue);
}
readHiveRecordAndInsertIntoRecordBatch(deSerializedValue, skipRecordsInspector.getActualCount());
skipRecordsInspector.incrementActualCount();
}
skipRecordsInspector.incrementTempCount();
}
setValueCountAndPopulatePartitionVectors(skipRecordsInspector.getActualCount());
skipRecordsInspector.updateContinuance();
return skipRecordsInspector.getActualCount();
} catch (IOException | SerDeException e) {
throw new DrillRuntimeException(e);
}
}
示例8: next
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
@Override
public int next() {
for (ValueVector vv : vectors) {
AllocationHelper.allocateNew(vv, TARGET_RECORD_COUNT);
}
if (empty) {
setValueCountAndPopulatePartitionVectors(0);
return 0;
}
try {
// starting new batch, reset processed records count
recordsInspector.reset();
// process records till batch is full or all records were processed
while (!recordsInspector.isBatchFull() && hasNextValue(recordsInspector.getValueHolder())) {
Object value = recordsInspector.getNextValue();
if (value != null) {
Object deSerializedValue = partitionSerDe.deserialize((Writable) value);
if (partTblObjectInspectorConverter != null) {
deSerializedValue = partTblObjectInspectorConverter.convert(deSerializedValue);
}
readHiveRecordAndInsertIntoRecordBatch(deSerializedValue, recordsInspector.getProcessedRecordCount());
recordsInspector.incrementProcessedRecordCount();
}
}
setValueCountAndPopulatePartitionVectors(recordsInspector.getProcessedRecordCount());
return recordsInspector.getProcessedRecordCount();
} catch (SerDeException e) {
throw new DrillRuntimeException(e);
}
}
示例9: allocateVectors
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
/**
* Simple method to allocate space for all the vectors in the container.
*/
private void allocateVectors() {
for (final VectorWrapper<?> vw : container) {
AllocationHelper.allocateNew(vw.getValueVector(), MAX_BATCH_SIZE);
}
}
示例10: allocateVectors
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
private void allocateVectors(int targetRecordCount) {
for (VectorWrapper w: outgoing) {
AllocationHelper.allocateNew(w.getValueVector(), targetRecordCount);
}
}
示例11: allocateVectors
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
public static void allocateVectors(VectorAccessible va, int targetRecordCount) {
for (VectorWrapper<?> w: va) {
AllocationHelper.allocateNew(w.getValueVector(), targetRecordCount);
}
}
示例12: allocateVectors
import org.apache.drill.exec.vector.AllocationHelper; //导入方法依赖的package包/类
public static void allocateVectors(Iterable<ValueVector> valueVectors, int count) {
for (final ValueVector v : valueVectors) {
AllocationHelper.allocateNew(v, count);
}
}