本文整理汇总了Java中org.apache.drill.exec.ExecConstants类的典型用法代码示例。如果您正苦于以下问题:Java ExecConstants类的具体用法?Java ExecConstants怎么用?Java ExecConstants使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ExecConstants类属于org.apache.drill.exec包,在下文中一共展示了ExecConstants类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: alterSessionOption
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
@Test
public void alterSessionOption() throws Exception {
newTest() //
.sqlQuery("select bool_val as bool from sys.options where name = '%s' order by type desc", ExecConstants.JSON_ALL_TEXT_MODE)
.baselineColumns("bool")
.ordered()
.baselineValues(false)
.go();
test("alter session set `%s` = true", ExecConstants.JSON_ALL_TEXT_MODE);
newTest() //
.sqlQuery("select bool_val as bool from sys.options where name = '%s' order by type desc ", ExecConstants.JSON_ALL_TEXT_MODE)
.baselineColumns("bool")
.ordered()
.baselineValues(false)
.baselineValues(true)
.go();
}
示例2: onMatch
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
@Override
public void onMatch(RelOptRuleCall call) {
final DrillWriterRel writer = call.rel(0);
final RelNode input = call.rel(1);
final List<Integer> keys = writer.getPartitionKeys();
final RelCollation collation = getCollation(keys);
final boolean hashDistribute = PrelUtil.getPlannerSettings(call.getPlanner()).getOptions().getOption(ExecConstants.CTAS_PARTITIONING_HASH_DISTRIBUTE_VALIDATOR);
final RelTraitSet traits = hashDistribute ?
input.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(collation).plus(getDistribution(keys)) :
input.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(collation);
final RelNode convertedInput = convert(input, traits);
if (!new WriteTraitPull(call).go(writer, convertedInput)) {
DrillWriterRelBase newWriter = new WriterPrel(writer.getCluster(), convertedInput.getTraitSet(),
convertedInput, writer.getCreateTableEntry());
call.transformTo(newWriter);
}
}
示例3: computeSelfCost
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
@Override
public RelOptCost computeSelfCost(RelOptPlanner planner) {
if(PrelUtil.getSettings(getCluster()).useDefaultCosting()) {
//We use multiplier 0.05 for TopN operator, and 0.1 for Sort, to make TopN a preferred choice.
return super.computeSelfCost(planner).multiplyBy(.1);
}
RelNode child = this.getInput();
double inputRows = RelMetadataQuery.getRowCount(child);
// int rowWidth = child.getRowType().getPrecision();
int numSortFields = this.collation.getFieldCollations().size();
double cpuCost = DrillCostBase.COMPARE_CPU_COST * numSortFields * inputRows * (Math.log(inputRows)/Math.log(2));
double diskIOCost = 0; // assume in-memory for now until we enforce operator-level memory constraints
// TODO: use rowWidth instead of avgFieldWidth * numFields
// avgFieldWidth * numFields * inputRows
double numFields = this.getRowType().getFieldCount();
long fieldWidth = PrelUtil.getPlannerSettings(planner).getOptions()
.getOption(ExecConstants.AVERAGE_FIELD_WIDTH_KEY).num_val;
double memCost = fieldWidth * numFields * inputRows;
DrillCostFactory costFactory = (DrillCostFactory) planner.getCostFactory();
return costFactory.makeCost(inputRows, cpuCost, diskIOCost, 0, memCost);
}
示例4: initSpooler
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
private synchronized void initSpooler() throws IOException {
if (spooler != null) {
return;
}
Configuration conf = new Configuration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, context.getConfig().getString(ExecConstants.TEMP_FILESYSTEM));
conf.set(DRILL_LOCAL_IMPL_STRING, LocalSyncableFileSystem.class.getName());
fs = FileSystem.get(conf);
path = getPath();
outputStream = fs.create(path);
final String spoolingThreadName = QueryIdHelper.getExecutorThreadName(context.getHandle()).concat(
":Spooler-" + oppositeId + "-" + bufferIndex);
spooler = new Spooler(spoolingThreadName);
spooler.start();
}
示例5: setupSortMemoryAllocations
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
private void setupSortMemoryAllocations(final PhysicalPlan plan) {
// look for external sorts
final List<ExternalSort> sortList = new LinkedList<>();
for (final PhysicalOperator op : plan.getSortedOperators()) {
if (op instanceof ExternalSort) {
sortList.add((ExternalSort) op);
}
}
// if there are any sorts, compute the maximum allocation, and set it on them
if (sortList.size() > 0) {
final OptionManager optionManager = queryContext.getOptions();
final long maxWidthPerNode = optionManager.getOption(ExecConstants.MAX_WIDTH_PER_NODE_KEY).num_val;
long maxAllocPerNode = Math.min(DrillConfig.getMaxDirectMemory(),
queryContext.getConfig().getLong(ExecConstants.TOP_LEVEL_MAX_ALLOC));
maxAllocPerNode = Math.min(maxAllocPerNode,
optionManager.getOption(ExecConstants.MAX_QUERY_MEMORY_PER_NODE_KEY).num_val);
final long maxSortAlloc = maxAllocPerNode / (sortList.size() * maxWidthPerNode);
logger.debug("Max sort alloc: {}", maxSortAlloc);
for(final ExternalSort externalSort : sortList) {
externalSort.setMaxAllocation(maxSortAlloc);
}
}
}
示例6: TraceRecordBatch
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
public TraceRecordBatch(Trace pop, RecordBatch incoming, FragmentContext context) throws ExecutionSetupException {
super(pop, context, incoming);
this.traceTag = pop.traceTag;
logLocation = context.getConfig().getString(ExecConstants.TRACE_DUMP_DIRECTORY);
String fileName = getFileName();
/* Create the log file we will dump to and initialize the file descriptors */
try {
Configuration conf = new Configuration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, context.getConfig().getString(ExecConstants.TRACE_DUMP_FILESYSTEM));
FileSystem fs = FileSystem.get(conf);
/* create the file */
fos = fs.create(new Path(fileName));
} catch (IOException e) {
throw new ExecutionSetupException("Unable to create file: " + fileName + " check permissions or if directory exists", e);
}
}
示例7: ExternalSortBatch
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
public ExternalSortBatch(ExternalSort popConfig, FragmentContext context, RecordBatch incoming) throws OutOfMemoryException {
super(popConfig, context, true);
this.incoming = incoming;
DrillConfig config = context.getConfig();
Configuration conf = new Configuration();
conf.set("fs.default.name", config.getString(ExecConstants.EXTERNAL_SORT_SPILL_FILESYSTEM));
try {
this.fs = FileSystem.get(conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
SPILL_BATCH_GROUP_SIZE = config.getInt(ExecConstants.EXTERNAL_SORT_SPILL_GROUP_SIZE);
SPILL_THRESHOLD = config.getInt(ExecConstants.EXTERNAL_SORT_SPILL_THRESHOLD);
dirs = Iterators.cycle(config.getStringList(ExecConstants.EXTERNAL_SORT_SPILL_DIRS));
copierAllocator = oContext.getAllocator().getChildAllocator(
context, PriorityQueueCopier.INITIAL_ALLOCATION, PriorityQueueCopier.MAX_ALLOCATION, true);
FragmentHandle handle = context.getHandle();
fileName = String.format("%s/major_fragment_%s/minor_fragment_%s/operator_%s", QueryIdHelper.getQueryId(handle.getQueryId()),
handle.getMajorFragmentId(), handle.getMinorFragmentId(), popConfig.getOperatorId());
}
示例8: MongoRecordReader
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
public MongoRecordReader(
MongoSubScan.MongoSubScanSpec subScanSpec,
List<SchemaPath> projectedColumns,
FragmentContext context,
MongoStoragePlugin plugin) {
fields = new BasicDBObject();
// exclude _id field, if not mentioned by user.
fields.put(DrillMongoConstants.ID, Integer.valueOf(0));
setColumns(projectedColumns);
fragmentContext = context;
this.plugin = plugin;
filters = new BasicDBObject();
Map<String, List<BasicDBObject>> mergedFilters = MongoUtils.mergeFilters(
subScanSpec.getMinFilters(), subScanSpec.getMaxFilters());
buildFilters(subScanSpec.getFilter(), mergedFilters);
enableAllTextMode = fragmentContext.getOptions().getOption(ExecConstants.MONGO_ALL_TEXT_MODE).bool_val;
readNumbersAsDouble = fragmentContext.getOptions().getOption(ExecConstants.MONGO_READER_READ_NUMBERS_AS_DOUBLE).bool_val;
init(subScanSpec);
}
示例9: createNewTable
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
@Override
public CreateTableEntry createNewTable(String tableName, List<String> partitonColumns) {
String storage = schemaConfig.getOption(ExecConstants.OUTPUT_FORMAT_OPTION).string_val;
FormatPlugin formatPlugin = plugin.getFormatPlugin(storage);
if (formatPlugin == null) {
throw new UnsupportedOperationException(
String.format("Unsupported format '%s' in workspace '%s'", config.getDefaultInputFormat(),
Joiner.on(".").join(getSchemaPath())));
}
return new FileSystemCreateTableEntry(
(FileSystemConfig) plugin.getConfig(),
formatPlugin,
config.getLocation() + Path.SEPARATOR + tableName,
partitonColumns);
}
示例10: trainNewModel
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
@Override
public CreateTableEntry trainNewModel(String modelName, List<String> partitonColumns) {
String storage = schemaConfig.getOption(ExecConstants.OUTPUT_FORMAT_OPTION).string_val;
// System.out.println("Shadi: Sotrage="+storage);
// logger.info("Shadi: Storage="+storage);
FormatPlugin formatPlugin = plugin.getFormatPlugin(storage);
if (formatPlugin == null) {
throw new UnsupportedOperationException(
String.format("Unsupported format '%s' in workspace '%s'", config.getDefaultInputFormat(),
Joiner.on(".").join(getSchemaPath())));
}
return new FileSystemTrainModelEntry(
(FileSystemConfig) plugin.getConfig(),
formatPlugin,
config.getLocation() + Path.SEPARATOR + modelName,
partitonColumns);
}
示例11: JSONRecordReader
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
private JSONRecordReader(final FragmentContext fragmentContext, final String inputPath,
final JsonNode embeddedContent, final DrillFileSystem fileSystem,
final List<SchemaPath> columns) {
Preconditions.checkArgument(
(inputPath == null && embeddedContent != null) ||
(inputPath != null && embeddedContent == null),
"One of inputPath or embeddedContent must be set but not both."
);
if(inputPath != null) {
this.hadoopPath = new Path(inputPath);
} else {
this.embeddedContent = embeddedContent;
}
this.fileSystem = fileSystem;
this.fragmentContext = fragmentContext;
// only enable all text mode if we aren't using embedded content mode.
this.enableAllTextMode = embeddedContent == null && fragmentContext.getOptions().getOption(ExecConstants.JSON_READER_ALL_TEXT_MODE_VALIDATOR);
this.readNumbersAsDouble = fragmentContext.getOptions().getOption(ExecConstants.JSON_READ_NUMBERS_AS_DOUBLE).bool_val;
setColumns(columns);
}
示例12: getRecordWriter
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
@Override
public RecordWriter getRecordWriter(FragmentContext context, EasyWriter writer) throws IOException {
Map<String, String> options = Maps.newHashMap();
options.put("location", writer.getLocation());
FragmentHandle handle = context.getHandle();
String fragmentId = String.format("%d_%d", handle.getMajorFragmentId(), handle.getMinorFragmentId());
options.put("prefix", fragmentId);
options.put("separator", " ");
options.put(FileSystem.FS_DEFAULT_NAME_KEY, ((FileSystemConfig)writer.getStorageConfig()).connection);
options.put("extension", "json");
options.put("extended", Boolean.toString(context.getOptions().getOption(ExecConstants.JSON_EXTENDED_TYPES)));
RecordWriter recordWriter = new JsonRecordWriter();
recordWriter.init(options);
return recordWriter;
}
示例13: verifyZkStore
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
@Test
public void verifyZkStore() throws Exception {
DrillConfig config = getConfig();
String connect = config.getString(ExecConstants.ZK_CONNECTION);
CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder()
.namespace(config.getString(ExecConstants.ZK_ROOT))
.retryPolicy(new RetryNTimes(1, 100))
.connectionTimeoutMs(config.getInt(ExecConstants.ZK_TIMEOUT))
.connectString(connect);
try(CuratorFramework curator = builder.build()){
curator.start();
ZkPStoreProvider provider = new ZkPStoreProvider(config, curator);
PStoreTestUtil.test(provider);
}
}
示例14: testSimilar
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
@Test
public void testSimilar() throws Exception {
String query = "select n_nationkey " +
"from cp.`tpch/nation.parquet` " +
"where n_name similar to 'CHINA' " +
"order by n_regionkey";
testBuilder()
.sqlQuery(query)
.unOrdered()
.optionSettingQueriesForTestQuery("alter session set `planner.slice_target` = 1")
.baselineColumns("n_nationkey")
.baselineValues(18)
.go();
test("alter session set `planner.slice_target` = " + ExecConstants.SLICE_TARGET_DEFAULT);
}
示例15: startSomeDrillbits
import org.apache.drill.exec.ExecConstants; //导入依赖的package包/类
@BeforeClass
public static void startSomeDrillbits() throws Exception {
// turn off the HTTP server to avoid port conflicts between the drill bits
System.setProperty(ExecConstants.HTTP_ENABLE, "false");
// turn on error for failure in cancelled fragments
zkHelper = new ZookeeperHelper(true);
zkHelper.startZookeeper(1);
// use a non-null service set so that the drillbits can use port hunting
remoteServiceSet = RemoteServiceSet.getLocalServiceSet();
// create name-addressable drillbits
startDrillbit(DRILLBIT_ALPHA, remoteServiceSet);
startDrillbit(DRILLBIT_BETA, remoteServiceSet);
startDrillbit(DRILLBIT_GAMMA, remoteServiceSet);
// create a client
final DrillConfig drillConfig = zkHelper.getConfig();
drillClient = QueryTestUtil.createClient(drillConfig, remoteServiceSet, 1, null);
clearAllInjections();
}