本文整理汇总了Java中org.apache.drill.exec.record.VectorWrapper类的典型用法代码示例。如果您正苦于以下问题:Java VectorWrapper类的具体用法?Java VectorWrapper怎么用?Java VectorWrapper使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
VectorWrapper类属于org.apache.drill.exec.record包,在下文中一共展示了VectorWrapper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: showSingleBatch
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
private void showSingleBatch (VectorAccessibleSerializable vcSerializable, boolean showHeader) {
final VectorContainer vectorContainer = (VectorContainer) vcSerializable.get();
/* show the header of the batch */
if (showHeader) {
System.out.println(getBatchMetaInfo(vcSerializable).toString());
System.out.println("Schema Information");
for (final VectorWrapper w : vectorContainer) {
final MaterializedField field = w.getValueVector().getField();
System.out.println (String.format("name : %s, minor_type : %s, data_mode : %s",
field.toExpr(),
field.getType().getMinorType().toString(),
field.isNullable() ? "nullable":"non-nullable"
));
}
}
/* show the contents in the batch */
VectorUtil.showVectorAccessibleContent(vectorContainer);
}
示例2: getBatchMetaInfo
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
private BatchMetaInfo getBatchMetaInfo(VectorAccessibleSerializable vcSerializable) {
final VectorAccessible vectorContainer = vcSerializable.get();
int rows = 0;
int selectedRows = 0;
int totalDataSize = 0;
rows = vectorContainer.getRecordCount();
selectedRows = rows;
if (vectorContainer.getSchema().getSelectionVectorMode() == SelectionVectorMode.TWO_BYTE) {
selectedRows = vcSerializable.getSv2().getCount();
}
for (final VectorWrapper w : vectorContainer) {
totalDataSize += w.getValueVector().getBufferSize();
}
return new BatchMetaInfo(rows, selectedRows, totalDataSize);
}
示例3: handleRemainder
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
private void handleRemainder() {
final int remainingRecordCount = incoming.getRecordCount() - remainderIndex;
if (!doAlloc()) {
outOfMemory = true;
return;
}
final int projRecords = projector.projectRecords(remainderIndex, remainingRecordCount, 0);
if (projRecords < remainingRecordCount) {
setValueCount(projRecords);
this.recordCount = projRecords;
remainderIndex += projRecords;
} else {
setValueCount(remainingRecordCount);
hasRemainder = false;
remainderIndex = 0;
for (final VectorWrapper<?> v : incoming) {
v.clear();
}
this.recordCount = remainingRecordCount;
}
// In case of complex writer expression, vectors would be added to batch run-time.
// We have to re-build the schema.
if (complexWriters != null) {
container.buildSchema(SelectionVectorMode.NONE);
}
}
示例4: innerNext
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
@Override
public IterOutcome innerNext() {
if(!first && !noEndLimit && recordsLeft <= 0) {
incoming.kill(true);
IterOutcome upStream = next(incoming);
if (upStream == IterOutcome.OUT_OF_MEMORY) {
return upStream;
}
while (upStream == IterOutcome.OK || upStream == IterOutcome.OK_NEW_SCHEMA) {
// Clear the memory for the incoming batch
for (VectorWrapper<?> wrapper : incoming) {
wrapper.getValueVector().clear();
}
upStream = next(incoming);
if (upStream == IterOutcome.OUT_OF_MEMORY) {
return upStream;
}
}
return IterOutcome.NONE;
}
return super.innerNext();
}
示例5: buildSchema
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
@Override
public void buildSchema() throws SchemaChangeException {
IterOutcome outcome = next(incoming);
switch (outcome) {
case NONE:
state = BatchState.DONE;
container.buildSchema(SelectionVectorMode.NONE);
return;
case OUT_OF_MEMORY:
state = BatchState.OUT_OF_MEMORY;
return;
case STOP:
state = BatchState.STOP;
return;
}
if (!createAggregator()) {
state = BatchState.DONE;
}
for (VectorWrapper w : container) {
AllocationHelper.allocatePrecomputedChildCount(w.getValueVector(), 0, 0, 0);
}
}
示例6: allocateOutgoing
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
private void allocateOutgoing(int records) {
// Skip the keys and only allocate for outputting the workspace values
// (keys will be output through splitAndTransfer)
Iterator<VectorWrapper<?>> outgoingIter = outContainer.iterator();
for (int i = 0; i < numGroupByOutFields; i++) {
outgoingIter.next();
}
while (outgoingIter.hasNext()) {
ValueVector vv = outgoingIter.next().getValueVector();
MajorType type = vv.getField().getType();
/*
* In build schema we use the allocation model that specifies exact record count
* so we need to stick with that allocation model until DRILL-2211 is resolved. Using
* 50 as the average bytes per value as is used in HashTable.
*/
AllocationHelper.allocatePrecomputedChildCount(vv, records, VARIABLE_WIDTH_VALUE_SIZE, 0);
}
}
示例7: InternalBatch
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
public InternalBatch(RecordBatch incoming, VectorWrapper[] ignoreWrappers){
switch(incoming.getSchema().getSelectionVectorMode()){
case FOUR_BYTE:
this.sv4 = incoming.getSelectionVector4().createNewWrapperCurrent();
this.sv2 = null;
break;
case TWO_BYTE:
this.sv4 = null;
this.sv2 = incoming.getSelectionVector2().clone();
break;
default:
this.sv4 = null;
this.sv2 = null;
}
this.schema = incoming.getSchema();
this.container = VectorContainer.getTransferClone(incoming, ignoreWrappers);
}
示例8: buildSchema
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
@Override
public void buildSchema() throws SchemaChangeException {
IterOutcome outcome = next(incoming);
switch (outcome) {
case NONE:
state = BatchState.DONE;
container.buildSchema(SelectionVectorMode.NONE);
return;
case OUT_OF_MEMORY:
state = BatchState.OUT_OF_MEMORY;
return;
case STOP:
state = BatchState.STOP;
return;
}
if (!createAggregator()) {
state = BatchState.DONE;
}
for (final VectorWrapper<?> w : container) {
w.getValueVector().allocateNew();
}
}
示例9: getGenerated2Copier
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
private Copier getGenerated2Copier() throws SchemaChangeException{
Preconditions.checkArgument(incoming.getSchema().getSelectionVectorMode() == SelectionVectorMode.TWO_BYTE);
for(VectorWrapper<?> vv : incoming){
TransferPair tp = vv.getValueVector().makeTransferPair(container.addOrGet(vv.getField(), callBack));
}
try {
final CodeGenerator<Copier> cg = CodeGenerator.get(Copier.TEMPLATE_DEFINITION2, context.getFunctionRegistry());
CopyUtil.generateCopies(cg.getRoot(), incoming, false);
Copier copier = context.getImplementationClass(cg);
copier.setupRemover(context, incoming, this);
return copier;
} catch (ClassTransformationException | IOException e) {
throw new SchemaChangeException("Failure while attempting to load generated class", e);
}
}
示例10: getGenerated4Copier
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
public static Copier getGenerated4Copier(RecordBatch batch, FragmentContext context, BufferAllocator allocator, VectorContainer container, RecordBatch outgoing, SchemaChangeCallBack callBack) throws SchemaChangeException{
for(VectorWrapper<?> vv : batch){
ValueVector v = vv.getValueVectors()[0];
v.makeTransferPair(container.addOrGet(v.getField(), callBack));
}
try {
final CodeGenerator<Copier> cg = CodeGenerator.get(Copier.TEMPLATE_DEFINITION4, context.getFunctionRegistry());
CopyUtil.generateCopies(cg.getRoot(), batch, true);
Copier copier = context.getImplementationClass(cg);
copier.setupRemover(context, batch, outgoing);
return copier;
} catch (ClassTransformationException | IOException e) {
throw new SchemaChangeException("Failure while attempting to load generated class", e);
}
}
示例11: copyRecords
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
@Override
public int copyRecords(int index, int recordCount){
for(VectorWrapper<?> out : outgoing){
MajorType type = out.getField().getType();
if (!Types.isFixedWidthType(type) || Types.isRepeated(type)) {
out.getValueVector().allocateNew();
} else {
AllocationHelper.allocate(out.getValueVector(), recordCount, 1);
}
}
int outgoingPosition = 0;
for(int svIndex = index; svIndex < index + recordCount; svIndex++, outgoingPosition++){
int deRefIndex = sv4.get(svIndex);
doEval(deRefIndex, outgoingPosition);
}
return outgoingPosition;
}
示例12: copyRecords
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
@Override
public int copyRecords(int index, int recordCount){
for(VectorWrapper<?> out : outgoing){
MajorType type = out.getField().getType();
if (!Types.isFixedWidthType(type) || Types.isRepeated(type)) {
out.getValueVector().allocateNew();
} else {
AllocationHelper.allocate(out.getValueVector(), recordCount, 1);
}
}
int outgoingPosition = 0;
for(int svIndex = index; svIndex < index + recordCount; svIndex++, outgoingPosition++){
doEval(sv2.getIndex(svIndex), outgoingPosition);
}
return outgoingPosition;
}
示例13: WindowDataBatch
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
public WindowDataBatch(final VectorAccessible batch, final OperatorContext oContext) {
this.oContext = oContext;
recordCount = batch.getRecordCount();
List<ValueVector> vectors = Lists.newArrayList();
for (VectorWrapper<?> v : batch) {
if (v.isHyper()) {
throw new UnsupportedOperationException("Record batch data can't be created based on a hyper batch.");
}
TransferPair tp = v.getValueVector().getTransferPair();
tp.transfer();
vectors.add(tp.getTo());
}
container = new VectorContainer(oContext);
container.addCollection(vectors);
container.setRecordCount(recordCount);
container.buildSchema(batch.getSchema().getSelectionVectorMode());
}
示例14: RecordBatchData
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
public RecordBatchData(VectorAccessible batch) {
List<ValueVector> vectors = Lists.newArrayList();
recordCount = batch.getRecordCount();
if (batch instanceof RecordBatch && batch.getSchema().getSelectionVectorMode() == SelectionVectorMode.TWO_BYTE) {
this.sv2 = ((RecordBatch)batch).getSelectionVector2().clone();
} else {
this.sv2 = null;
}
for (VectorWrapper<?> v : batch) {
if (v.isHyper()) {
throw new UnsupportedOperationException("Record batch data can't be created based on a hyper batch.");
}
TransferPair tp = v.getValueVector().getTransferPair();
tp.transfer();
vectors.add(tp.getTo());
}
container.addCollection(vectors);
container.setRecordCount(recordCount);
container.buildSchema(batch.getSchema().getSelectionVectorMode());
}
示例15: getOutput
import org.apache.drill.exec.record.VectorWrapper; //导入依赖的package包/类
private List<List<String>> getOutput(List<QueryDataBatch> batches) throws SchemaChangeException {
List<List<String>> output = new ArrayList<>();
RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
int last = 0;
for(QueryDataBatch batch : batches) {
int rows = batch.getHeader().getRowCount();
if(batch.getData() != null) {
loader.load(batch.getHeader().getDef(), batch.getData());
// TODO: Clean: DRILL-2933: That load(...) no longer throws
// SchemaChangeException, so check/clean throws clause above.
for (int i = 0; i < rows; ++i) {
output.add(new ArrayList<String>());
for (VectorWrapper<?> vw: loader) {
ValueVector.Accessor accessor = vw.getValueVector().getAccessor();
Object o = accessor.getObject(i);
output.get(last).add(o == null ? null: o.toString());
}
++last;
}
}
loader.clear();
batch.release();
}
return output;
}