本文整理汇总了Java中org.apache.drill.exec.ops.FragmentContext.fail方法的典型用法代码示例。如果您正苦于以下问题:Java FragmentContext.fail方法的具体用法?Java FragmentContext.fail怎么用?Java FragmentContext.fail使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.drill.exec.ops.FragmentContext
的用法示例。
在下文中一共展示了FragmentContext.fail方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: AbstractDataCollector
import org.apache.drill.exec.ops.FragmentContext; //导入方法依赖的package包/类
/**
* @param parentAccounter
* @param receiver
* @param numBuffers Number of RawBatchBuffer inputs required to store the incoming data
* @param bufferCapacity Capacity of each RawBatchBuffer.
* @param context
*/
public AbstractDataCollector(AtomicInteger parentAccounter,
final int numBuffers, Collector collector, final int bufferCapacity, FragmentContext context) {
Preconditions.checkNotNull(collector);
Preconditions.checkNotNull(parentAccounter);
this.incomingStreams = collector.getIncomingMinorFragmentCount();
this.parentAccounter = parentAccounter;
this.remainders = new AtomicIntegerArray(incomingStreams);
this.oppositeMajorFragmentId = collector.getOppositeMajorFragmentId();
// Create fragmentId to index that is within the range [0, incoming.size()-1]
// We use this mapping to find objects belonging to the fragment in buffers and remainders arrays.
fragmentMap = new ArrayWrappedIntIntMap();
int index = 0;
for (Integer endpoint : collector.getIncomingMinorFragmentList()) {
fragmentMap.put(endpoint, index);
index++;
}
buffers = new RawBatchBuffer[numBuffers];
remainingRequired = new AtomicInteger(numBuffers);
final boolean spooling = collector.getIsSpooling();
try {
for (int i = 0; i < numBuffers; i++) {
if (spooling) {
buffers[i] = new SpoolingRawBatchBuffer(context, bufferCapacity, collector.getOppositeMajorFragmentId(), i);
} else {
buffers[i] = new UnlimitedRawBatchBuffer(context, bufferCapacity, collector.getOppositeMajorFragmentId());
}
}
} catch (IOException | OutOfMemoryException e) {
logger.error("Exception", e);
context.fail(e);
}
}
示例2: getExec
import org.apache.drill.exec.ops.FragmentContext; //导入方法依赖的package包/类
/**
* Create and return fragment RootExec for given FragmentRoot. RootExec has one or more RecordBatches as children
* (which may contain child RecordBatches and so on).
*
* @param context
* FragmentContext.
* @param root
* FragmentRoot.
* @return RootExec of fragment.
* @throws ExecutionSetupException
*/
public static RootExec getExec(FragmentContext context, FragmentRoot root) throws ExecutionSetupException {
Preconditions.checkNotNull(root);
Preconditions.checkNotNull(context);
if (AssertionUtil.isAssertionsEnabled()) {
root = IteratorValidatorInjector.rewritePlanWithIteratorValidator(context, root);
}
final ImplCreator creator = new ImplCreator();
Stopwatch watch = new Stopwatch();
watch.start();
try {
final RootExec rootExec = creator.getRootExec(root, context);
// skip over this for SimpleRootExec (testing)
if (rootExec instanceof BaseRootExec) {
((BaseRootExec) rootExec).setOperators(creator.getOperators());
}
logger.debug("Took {} ms to create RecordBatch tree", watch.elapsed(TimeUnit.MILLISECONDS));
if (rootExec == null) {
throw new ExecutionSetupException(
"The provided fragment did not have a root node that correctly created a RootExec value.");
}
return rootExec;
} catch(Exception e) {
context.fail(e);
for(final CloseableRecordBatch crb : creator.getOperators()) {
AutoCloseables.close(crb, logger);
}
}
return null;
}