当前位置: 首页>>代码示例>>Java>>正文


Java FragmentContext类代码示例

本文整理汇总了Java中org.apache.drill.exec.ops.FragmentContext的典型用法代码示例。如果您正苦于以下问题:Java FragmentContext类的具体用法?Java FragmentContext怎么用?Java FragmentContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FragmentContext类属于org.apache.drill.exec.ops包,在下文中一共展示了FragmentContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doPhysicalTest

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
private SimpleRootExec doPhysicalTest(final DrillbitContext bitContext, UserClientConnection connection, String file)
    throws Exception {
  new NonStrictExpectations() {
    {
      bitContext.getMetrics();
      result = new MetricRegistry();
      bitContext.getAllocator();
      result = RootAllocatorFactory.newRoot(config);
      bitContext.getConfig();
      result = config;
    }
  };

  final StoragePluginRegistry reg = new StoragePluginRegistry(bitContext);

  final PhysicalPlanReader reader = new PhysicalPlanReader(config, config.getMapper(),
      CoordinationProtos.DrillbitEndpoint.getDefaultInstance(), reg);
  final PhysicalPlan plan = reader.readPhysicalPlan(Files.toString(FileUtils.getResourceAsFile(file), Charsets.UTF_8));
  final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(config);
  final FragmentContext context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
  final SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false)
      .iterator().next()));
  return exec;
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:25,代码来源:TestOptiqPlans.java

示例2: ParquetRecordReader

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
public ParquetRecordReader(
    FragmentContext fragmentContext,
    long batchSize,
    String path,
    int rowGroupIndex,
    FileSystem fs,
    DirectCodecFactory codecFactory,
    ParquetMetadata footer,
    List<SchemaPath> columns) throws ExecutionSetupException {
  this.hadoopPath = new Path(path);
  this.fileSystem = fs;
  this.codecFactory = codecFactory;
  this.rowGroupIndex = rowGroupIndex;
  this.batchSize = batchSize;
  this.footer = footer;
  this.fragmentContext = fragmentContext;
  setColumns(columns);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:19,代码来源:ParquetRecordReader.java

示例3: OrderedPartitionRecordBatch

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
public OrderedPartitionRecordBatch(OrderedPartitionSender pop, RecordBatch incoming, FragmentContext context) throws OutOfMemoryException {
  super(pop, context);
  this.incoming = incoming;
  this.partitions = pop.getDestinations().size();
  this.sendingMajorFragmentWidth = pop.getSendingWidth();
  this.recordsToSample = pop.getRecordsToSample();
  this.samplingFactor = pop.getSamplingFactor();
  this.completionFactor = pop.getCompletionFactor();

  DistributedCache cache = null;
  this.mmap = cache.getMultiMap(MULTI_CACHE_CONFIG);
  this.tableMap = cache.getMap(SINGLE_CACHE_CONFIG);
  Preconditions.checkNotNull(tableMap);

  this.mapKey = String.format("%s_%d", context.getHandle().getQueryId(), context.getHandle().getMajorFragmentId());
  this.minorFragmentSampleCount = cache.getCounter(mapKey);

  SchemaPath outputPath = popConfig.getRef();
  MaterializedField outputField = MaterializedField.create(outputPath, Types.required(TypeProtos.MinorType.INT));
  this.partitionKeyVector = (IntVector) TypeHelper.getNewVector(outputField, oContext.getAllocator());

}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:23,代码来源:OrderedPartitionRecordBatch.java

示例4: setupRootFragment

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
/**
 * Set up the root fragment (which will run locally), and submit it for execution.
 *
 * @param rootFragment
 * @param rootOperator
 * @throws ExecutionSetupException
 */
private void setupRootFragment(final PlanFragment rootFragment, final FragmentRoot rootOperator)
    throws ExecutionSetupException {
  @SuppressWarnings("resource")
  final FragmentContext rootContext = new FragmentContext(drillbitContext, rootFragment, queryContext,
      initiatingClient, drillbitContext.getFunctionImplementationRegistry());
  @SuppressWarnings("resource")
  final IncomingBuffers buffers = new IncomingBuffers(rootFragment, rootContext);
  rootContext.setBuffers(buffers);

  queryManager.addFragmentStatusTracker(rootFragment, true);

  final ControlTunnel tunnel = drillbitContext.getController().getTunnel(queryContext.getCurrentEndpoint());
  final FragmentExecutor rootRunner = new FragmentExecutor(rootContext, rootFragment,
      new FragmentStatusReporter(rootContext, tunnel),
      rootOperator);
  final RootFragmentManager fragmentManager = new RootFragmentManager(rootFragment.getHandle(), buffers, rootRunner);

  if (buffers.isDone()) {
    // if we don't have to wait for any incoming data, start the fragment runner.
    bee.addFragmentRunner(fragmentManager.getRunnable());
  } else {
    // if we do, record the fragment manager in the workBus.
    drillbitContext.getWorkBus().addFragmentManager(fragmentManager);
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:33,代码来源:Foreman.java

示例5: testJoinBatchSize

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
@Test
public void testJoinBatchSize(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable{
  new NonStrictExpectations() {{
    bitContext.getMetrics(); result = new MetricRegistry();
    bitContext.getAllocator(); result = RootAllocatorFactory.newRoot(c);
    bitContext.getConfig(); result = c;
    bitContext.getOperatorCreatorRegistry(); result = new OperatorCreatorRegistry(c);
    bitContext.getCompiler(); result = CodeCompiler.getTestCompiler(c);
  }};

  final PhysicalPlanReader reader = new PhysicalPlanReader(c, c.getMapper(), CoordinationProtos.DrillbitEndpoint.getDefaultInstance());
  final PhysicalPlan plan = reader.readPhysicalPlan(Files.toString(FileUtils.getResourceAsFile("/join/join_batchsize.json"), Charsets.UTF_8));
  final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
  final FragmentContext context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
  final SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
  exec.next(); // skip schema batch
  while (exec.next()) {
    assertEquals(100, exec.getRecordCount());
  }

  if (context.getFailureCause() != null) {
    throw context.getFailureCause();
  }
  assertTrue(!context.isFailed());
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:26,代码来源:TestMergeJoin.java

示例6: getRecordWriter

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
@Override
public RecordWriter getRecordWriter(FragmentContext context, EasyWriter writer) throws IOException {
  Map<String, String> options = Maps.newHashMap();

  options.put("location", writer.getLocation());

  FragmentHandle handle = context.getHandle();
  String fragmentId = String.format("%d_%d", handle.getMajorFragmentId(), handle.getMinorFragmentId());
  options.put("prefix", fragmentId);

  options.put("separator", ((ModelFormatConfig)getConfig()).getDelimiter());
  options.put(FileSystem.FS_DEFAULT_NAME_KEY, ((FileSystemConfig)writer.getStorageConfig()).connection);

  options.put("extension", ((ModelFormatConfig)getConfig()).getExtensions().get(0));

  RecordWriter recordWriter = new DrillModelWriter(/*context.getAllocator()*/);
  recordWriter.init(options);

  return recordWriter;
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:21,代码来源:ModelFormatPlugin.java

示例7: setupHashJoinProbe

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
@Override
public void setupHashJoinProbe(FragmentContext context, VectorContainer buildBatch, RecordBatch probeBatch,
                               int probeRecordCount, HashJoinBatch outgoing, HashTable hashTable,
                               HashJoinHelper hjHelper, JoinRelType joinRelType) {

  this.probeBatch = probeBatch;
  this.probeSchema = probeBatch.getSchema();
  this.buildBatch = buildBatch;
  this.joinType = joinRelType;
  this.recordsToProcess = probeRecordCount;
  this.hashTable = hashTable;
  this.hjHelper = hjHelper;
  this.outgoingJoinBatch = outgoing;

  doSetup(context, buildBatch, probeBatch, outgoing);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:17,代码来源:HashJoinProbeTemplate.java

示例8: verifyLimitCount

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
private void verifyLimitCount(DrillbitContext bitContext, UserServer.UserClientConnection connection, String testPlan, int expectedCount) throws Throwable {
  final PhysicalPlanReader reader = new PhysicalPlanReader(c, c.getMapper(), CoordinationProtos.DrillbitEndpoint.getDefaultInstance());
  final PhysicalPlan plan = reader.readPhysicalPlan(Files.toString(FileUtils.getResourceAsFile("/limit/" + testPlan), Charsets.UTF_8));
  final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
  final FragmentContext context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
  final SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
  int recordCount = 0;
  while(exec.next()) {
    recordCount += exec.getRecordCount();
  }

  assertEquals(expectedCount, recordCount);

  if(context.getFailureCause() != null) {
    throw context.getFailureCause();
  }

  assertTrue(!context.isFailed());
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:20,代码来源:TestSimpleLimit.java

示例9: HiveTextRecordReader

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
public HiveTextRecordReader(Table table, Partition partition, InputSplit inputSplit, List<SchemaPath> projectedColumns, FragmentContext context) throws ExecutionSetupException {
  super(table, partition, inputSplit, projectedColumns, context, null);
  String d = table.getSd().getSerdeInfo().getParameters().get("field.delim");
  if (d != null) {
    delimiter = d.getBytes()[0];
  } else {
    delimiter = (byte) 1;
  }
  assert delimiter > 0;
  List<Integer> ids = Lists.newArrayList();
  for (int i = 0; i < tableColumns.size(); i++) {
    if (selectedColumnNames.contains(tableColumns.get(i))) {
      ids.add(i);
    }
  }
  columnIds = ids;
  numCols = tableColumns.size();
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:19,代码来源:HiveTextRecordReader.java

示例10: getRootExec

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
/** Create RootExec and its children (RecordBatches) for given FragmentRoot */
private RootExec getRootExec(final FragmentRoot root, final FragmentContext context) throws ExecutionSetupException {
  final List<RecordBatch> childRecordBatches = getChildren(root, context);

  if (context.isImpersonationEnabled()) {
    final UserGroupInformation proxyUgi = ImpersonationUtil.createProxyUgi(root.getUserName(), context.getQueryUserName());
    try {
      return proxyUgi.doAs(new PrivilegedExceptionAction<RootExec>() {
        @Override
        public RootExec run() throws Exception {
          return ((RootCreator<PhysicalOperator>) getOpCreator(root, context)).getRoot(context, root, childRecordBatches);
        }
      });
    } catch (InterruptedException | IOException e) {
      final String errMsg = String.format("Failed to create RootExec for operator with id '%d'", root.getOperatorId());
      logger.error(errMsg, e);
      throw new ExecutionSetupException(errMsg, e);
    }
  } else {
    return ((RootCreator<PhysicalOperator>) getOpCreator(root, context)).getRoot(context, root, childRecordBatches);
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:23,代码来源:ImplCreator.java

示例11: HiveRecordReader

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
public HiveRecordReader(Table table, Partition partition, InputSplit inputSplit, List<SchemaPath> projectedColumns,
                        FragmentContext context, Map<String, String> hiveConfigOverride) throws ExecutionSetupException {
  this.table = table;
  this.partition = partition;
  this.inputSplit = inputSplit;
  this.empty = (inputSplit == null && partition == null);
  this.hiveConfigOverride = hiveConfigOverride;
  this.fragmentContext = context;
  this.managedBuffer = fragmentContext.getManagedBuffer().reallocIfNeeded(256);
  setColumns(projectedColumns);
  init();
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:13,代码来源:HiveRecordReader.java

示例12: createNewSorter

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
public static Sorter createNewSorter(FragmentContext context, List<Ordering> orderings, VectorAccessible batch) throws ClassTransformationException, IOException, SchemaChangeException {
  final MappingSet mainMapping = new MappingSet( (String) null, null, ClassGenerator.DEFAULT_CONSTANT_MAP, ClassGenerator.DEFAULT_SCALAR_MAP);
  final MappingSet leftMapping = new MappingSet("leftIndex", null, ClassGenerator.DEFAULT_CONSTANT_MAP, ClassGenerator.DEFAULT_SCALAR_MAP);
  final MappingSet rightMapping = new MappingSet("rightIndex", null, ClassGenerator.DEFAULT_CONSTANT_MAP, ClassGenerator.DEFAULT_SCALAR_MAP);

  return createNewSorter(context, orderings, batch, mainMapping, leftMapping, rightMapping);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:8,代码来源:SortBatch.java

示例13: setup

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
public void setup(FragmentContext context, SelectionVector2 vector2, RecordBatch incoming) throws SchemaChangeException{
  Preconditions.checkNotNull(vector2);
  this.vector2 = vector2;
  try {
    doSetup(context, incoming, null);
  } catch (IllegalStateException e) {
    throw new SchemaChangeException(e);
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:10,代码来源:SingleBatchSorterTemplate.java

示例14: UnlimitedRawBatchBuffer

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
public UnlimitedRawBatchBuffer(FragmentContext context, int fragmentCount, int oppositeId) {
  super(context, fragmentCount);
  this.softlimit = bufferSizePerSocket * fragmentCount;
  this.startlimit = Math.max(softlimit/2, 1);
  logger.trace("softLimit: {}, startLimit: {}", softlimit, startlimit);
  this.bufferQueue = new UnlimitedBufferQueue();
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:8,代码来源:UnlimitedRawBatchBuffer.java

示例15: SpoolingRawBatchBuffer

import org.apache.drill.exec.ops.FragmentContext; //导入依赖的package包/类
public SpoolingRawBatchBuffer(FragmentContext context, int fragmentCount, int oppositeId, int bufferIndex) throws IOException, OutOfMemoryException {
  super(context, fragmentCount);
  this.allocator = context.getNewChildAllocator(ALLOCATOR_INITIAL_RESERVATION, ALLOCATOR_MAX_RESERVATION, true);
  this.threshold = context.getConfig().getLong(ExecConstants.SPOOLING_BUFFER_MEMORY);
  this.oppositeId = oppositeId;
  this.bufferIndex = bufferIndex;
  this.bufferQueue = new SpoolingBufferQueue();
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:9,代码来源:SpoolingRawBatchBuffer.java


注:本文中的org.apache.drill.exec.ops.FragmentContext类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。