本文整理汇总了Java中org.apache.drill.exec.ops.FragmentContext.newOperatorContext方法的典型用法代码示例。如果您正苦于以下问题:Java FragmentContext.newOperatorContext方法的具体用法?Java FragmentContext.newOperatorContext怎么用?Java FragmentContext.newOperatorContext使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.drill.exec.ops.FragmentContext
的用法示例。
在下文中一共展示了FragmentContext.newOperatorContext方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: UnorderedReceiverBatch
import org.apache.drill.exec.ops.FragmentContext; //导入方法依赖的package包/类
public UnorderedReceiverBatch(final FragmentContext context, final RawFragmentBatchProvider fragProvider, final UnorderedReceiver config) throws OutOfMemoryException {
this.fragProvider = fragProvider;
this.context = context;
// In normal case, batchLoader does not require an allocator. However, in case of splitAndTransfer of a value vector,
// we may need an allocator for the new offset vector. Therefore, here we pass the context's allocator to batchLoader.
oContext = context.newOperatorContext(config, false);
this.batchLoader = new RecordBatchLoader(oContext.getAllocator());
this.stats = oContext.getStats();
this.stats.setLongStat(Metric.NUM_SENDERS, config.getNumSenders());
this.config = config;
}
示例2: PartitionSenderRootExec
import org.apache.drill.exec.ops.FragmentContext; //导入方法依赖的package包/类
public PartitionSenderRootExec(FragmentContext context,
RecordBatch incoming,
HashPartitionSender operator) throws OutOfMemoryException {
super(context, context.newOperatorContext(operator, null, false), operator);
this.incoming = incoming;
this.operator = operator;
this.context = context;
outGoingBatchCount = operator.getDestinations().size();
popConfig = operator;
remainingReceivers = new AtomicIntegerArray(outGoingBatchCount);
remaingReceiverCount = new AtomicInteger(outGoingBatchCount);
stats.setLongStat(Metric.N_RECEIVERS, outGoingBatchCount);
// Algorithm to figure out number of threads to parallelize output
// numberOfRows/sliceTarget/numReceivers/threadfactor
this.cost = operator.getChild().getCost();
final OptionManager optMgr = context.getOptions();
long sliceTarget = optMgr.getOption(ExecConstants.SLICE_TARGET).num_val;
int threadFactor = optMgr.getOption(PlannerSettings.PARTITION_SENDER_THREADS_FACTOR.getOptionName()).num_val.intValue();
int tmpParts = 1;
if ( sliceTarget != 0 && outGoingBatchCount != 0 ) {
tmpParts = (int) Math.round((((cost / (sliceTarget*1.0)) / (outGoingBatchCount*1.0)) / (threadFactor*1.0)));
if ( tmpParts < 1) {
tmpParts = 1;
}
}
final int imposedThreads = optMgr.getOption(PlannerSettings.PARTITION_SENDER_SET_THREADS.getOptionName()).num_val.intValue();
if (imposedThreads > 0 ) {
this.numberPartitions = imposedThreads;
} else {
this.numberPartitions = Math.min(tmpParts, optMgr.getOption(PlannerSettings.PARTITION_SENDER_MAX_THREADS.getOptionName()).num_val.intValue());
}
logger.info("Preliminary number of sending threads is: " + numberPartitions);
this.actualPartitions = outGoingBatchCount > numberPartitions ? numberPartitions : outGoingBatchCount;
this.stats.setLongStat(Metric.SENDING_THREADS_COUNT, actualPartitions);
this.stats.setDoubleStat(Metric.COST, this.cost);
}
示例3: MergingRecordBatch
import org.apache.drill.exec.ops.FragmentContext; //导入方法依赖的package包/类
public MergingRecordBatch(final FragmentContext context,
final MergingReceiverPOP config,
final RawFragmentBatchProvider[] fragProviders) throws OutOfMemoryException {
super(config, context, true, context.newOperatorContext(config, false));
//super(config, context);
this.fragProviders = fragProviders;
this.context = context;
this.outgoingContainer = new VectorContainer(oContext);
this.stats.setLongStat(Metric.NUM_SENDERS, config.getNumSenders());
this.config = config;
this.inputCounts = new long[config.getNumSenders()];
this.outputCounts = new long[config.getNumSenders()];
}
示例4: BroadcastSenderRootExec
import org.apache.drill.exec.ops.FragmentContext; //导入方法依赖的package包/类
public BroadcastSenderRootExec(FragmentContext context,
RecordBatch incoming,
BroadcastSender config) throws OutOfMemoryException {
super(context, context.newOperatorContext(config, null, false), config);
this.ok = true;
this.incoming = incoming;
this.config = config;
this.handle = context.getHandle();
List<MinorFragmentEndpoint> destinations = config.getDestinations();
ArrayListMultimap<DrillbitEndpoint, Integer> dests = ArrayListMultimap.create();
for(MinorFragmentEndpoint destination : destinations) {
dests.put(destination.getEndpoint(), destination.getId());
}
int destCount = dests.keySet().size();
int i = 0;
this.tunnels = new AccountingDataTunnel[destCount];
this.receivingMinorFragments = new int[destCount][];
for(final DrillbitEndpoint ep : dests.keySet()){
List<Integer> minorsList= dests.get(ep);
int[] minorsArray = new int[minorsList.size()];
int x = 0;
for(Integer m : minorsList){
minorsArray[x++] = m;
}
receivingMinorFragments[i] = minorsArray;
tunnels[i] = context.getDataTunnel(ep);
i++;
}
}
示例5: BaseRootExec
import org.apache.drill.exec.ops.FragmentContext; //导入方法依赖的package包/类
public BaseRootExec(final FragmentContext fragmentContext, final PhysicalOperator config) throws OutOfMemoryException {
this.oContext = fragmentContext.newOperatorContext(config, stats, true);
stats = new OperatorStats(new OpProfileDef(config.getOperatorId(),
config.getOperatorType(), OperatorContext.getChildCount(config)),
oContext.getAllocator());
fragmentContext.getStats().addOperatorStats(this.stats);
this.fragmentContext = fragmentContext;
}
示例6: ParquetRecordWriter
import org.apache.drill.exec.ops.FragmentContext; //导入方法依赖的package包/类
public ParquetRecordWriter(FragmentContext context, ParquetWriter writer) throws OutOfMemoryException{
super();
this.oContext = context.newOperatorContext(writer, true);
this.codecFactory = new DirectCodecFactory(writer.getFormatPlugin().getFsConf(), oContext.getAllocator());
this.partitionColumns = writer.getPartitionColumns();
this.hasPartitions = partitionColumns != null && partitionColumns.size() > 0;
}
示例7: AbstractRecordBatch
import org.apache.drill.exec.ops.FragmentContext; //导入方法依赖的package包/类
protected AbstractRecordBatch(final T popConfig, final FragmentContext context) throws OutOfMemoryException {
this(popConfig, context, true, context.newOperatorContext(popConfig, true));
}
示例8: ScanBatch
import org.apache.drill.exec.ops.FragmentContext; //导入方法依赖的package包/类
public ScanBatch(PhysicalOperator subScanConfig, FragmentContext context, Iterator<RecordReader> readers)
throws ExecutionSetupException {
this(subScanConfig, context,
context.newOperatorContext(subScanConfig, false /* ScanBatch is not subject to fragment memory limit */),
readers, Collections.<String[]> emptyList(), Collections.<Integer> emptyList());
}
示例9: getReaderBatch
import org.apache.drill.exec.ops.FragmentContext; //导入方法依赖的package包/类
CloseableRecordBatch getReaderBatch(FragmentContext context, EasySubScan scan) throws ExecutionSetupException {
String partitionDesignator = context.getOptions()
.getOption(ExecConstants.FILESYSTEM_PARTITION_COLUMN_LABEL).string_val;
List<SchemaPath> columns = scan.getColumns();
List<RecordReader> readers = Lists.newArrayList();
List<String[]> partitionColumns = Lists.newArrayList();
List<Integer> selectedPartitionColumns = Lists.newArrayList();
boolean selectAllColumns = false;
if (columns == null || columns.size() == 0 || AbstractRecordReader.isStarQuery(columns)) {
selectAllColumns = true;
} else {
List<SchemaPath> newColumns = Lists.newArrayList();
Pattern pattern = Pattern.compile(String.format("%s[0-9]+", partitionDesignator));
for (SchemaPath column : columns) {
Matcher m = pattern.matcher(column.getAsUnescapedPath());
if (m.matches()) {
selectedPartitionColumns.add(Integer.parseInt(column.getAsUnescapedPath().toString().substring(partitionDesignator.length())));
} else {
newColumns.add(column);
}
}
// We must make sure to pass a table column(not to be confused with partition column) to the underlying record
// reader.
if (newColumns.size()==0) {
newColumns.add(AbstractRecordReader.STAR_COLUMN);
}
// Create a new sub scan object with the new set of columns;
EasySubScan newScan = new EasySubScan(scan.getUserName(), scan.getWorkUnits(), scan.getFormatPlugin(),
newColumns, scan.getSelectionRoot());
newScan.setOperatorId(scan.getOperatorId());
scan = newScan;
}
int numParts = 0;
OperatorContext oContext = context.newOperatorContext(scan, false /*
* ScanBatch is not subject to fragment memory
* limit
*/);
final DrillFileSystem dfs;
try {
dfs = oContext.newFileSystem(fsConf);
} catch (IOException e) {
throw new ExecutionSetupException(String.format("Failed to create FileSystem: %s", e.getMessage()), e);
}
for(FileWork work : scan.getWorkUnits()){
readers.add(getRecordReader(context, dfs, work, scan.getColumns()));
if (scan.getSelectionRoot() != null) {
String[] r = Path.getPathWithoutSchemeAndAuthority(new Path(scan.getSelectionRoot())).toString().split("/");
String[] p = Path.getPathWithoutSchemeAndAuthority(new Path(work.getPath())).toString().split("/");
if (p.length > r.length) {
String[] q = ArrayUtils.subarray(p, r.length, p.length - 1);
partitionColumns.add(q);
numParts = Math.max(numParts, q.length);
} else {
partitionColumns.add(new String[] {});
}
} else {
partitionColumns.add(new String[] {});
}
}
if (selectAllColumns) {
for (int i = 0; i < numParts; i++) {
selectedPartitionColumns.add(i);
}
}
return new ScanBatch(scan, context, oContext, readers.iterator(), partitionColumns, selectedPartitionColumns);
}