本文整理汇总了Java中org.apache.calcite.rel.RelNode类的典型用法代码示例。如果您正苦于以下问题:Java RelNode类的具体用法?Java RelNode怎么用?Java RelNode使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RelNode类属于org.apache.calcite.rel包,在下文中一共展示了RelNode类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: FilterRelBase
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
protected FilterRelBase(Convention convention, RelOptCluster cluster, RelTraitSet traits, RelNode child, RexNode condition) {
super(cluster, traits, child, condition);
assert getConvention() == convention;
// save the number of conjuncts that make up the filter condition such
// that repeated calls to the costing function can use the saved copy
conjunctions = RelOptUtil.conjunctions(condition);
numConjuncts = conjunctions.size();
// assert numConjuncts >= 1;
this.hasContains = ContainsRexVisitor.hasContainsCheckOrigin(this, this.getCondition(),-1);
boolean foundFlatten = false;
for (RexNode rex : this.getChildExps()) {
MoreRelOptUtil.FlattenRexVisitor visitor = new MoreRelOptUtil.FlattenRexVisitor();
if (rex.accept(visitor)) {
foundFlatten = true;
}
}
this.hasFlatten = foundFlatten;
}
示例2: DremioRelOptMaterialization
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
public DremioRelOptMaterialization(RelNode tableRel,
RelNode queryRel,
IncrementalUpdateSettings incrementalUpdateSettings,
LayoutInfo layoutInfo,
String materializationId,
BatchSchema schema,
long expirationTimestamp) {
// Create a RelOptMaterialization by manually specifying the RelOptTable.
// If the type casting has occurred, the RelOptTable will reside in the first input of the table rel.
super(tableRel, queryRel, tableRel.getTable() != null ? tableRel.getTable() : tableRel.getInput(0).getTable(), null);
this.incrementalUpdateSettings = Preconditions.checkNotNull(incrementalUpdateSettings);
this.materializationId = Preconditions.checkNotNull(materializationId);
this.schema = schema;
this.layoutInfo = Preconditions.checkNotNull(layoutInfo);
this.expirationTimestamp = expirationTimestamp;
}
示例3: go
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
boolean go(T n, RelNode candidateSet) throws E {
if ( !(candidateSet instanceof RelSubset) ) {
return false;
}
boolean transform = false;
for (RelNode rel : ((RelSubset)candidateSet).getRelList()) {
if (isPhysical(rel)) {
RelNode newRel = RelOptRule.convert(candidateSet, rel.getTraitSet().plus(Prel.DRILL_PHYSICAL));
RelNode out = convertChild(n, newRel);
if (out != null) {
call.transformTo(out);
transform = true;
}
}
}
return transform;
}
示例4: getPlan
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
private RelNode getPlan(final String queryString) {
final AtomicReference<RelNode> physical = new AtomicReference<>(null);
final SqlQuery query = new SqlQuery(queryString, DEFAULT_USERNAME);
final Job job = getJobsService().submitJob(JobRequest.newBuilder()
.setSqlQuery(query)
.setQueryType(QueryType.ACCELERATOR_CREATE)
.setDatasetPath(DatasetPath.NONE.toNamespaceKey())
.setDatasetVersion(DatasetVersion.NONE)
.build(), new NoOpJobStatusListener() {
@Override
public void planRelTransform(final PlannerPhase phase, final RelNode before, final RelNode after, final long millisTaken) {
if (phase == PlannerPhase.PHYSICAL) {
physical.set(after);
}
super.planRelTransform(phase, before, after, millisTaken);
}
});
job.getData().truncate(1);
return physical.get();
}
示例5: onMatch
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
@Override
public void onMatch(RelOptRuleCall call) {
PlannerSettings settings = PrelUtil.getPlannerSettings(call.getPlanner());
if (!settings.isNestedLoopJoinEnabled()) {
return;
}
final JoinRel join = (JoinRel) call.rel(0);
final RelNode left = join.getLeft();
final RelNode right = join.getRight();
if (!checkPreconditions(join, left, right, settings)) {
return;
}
try {
if (checkBroadcastConditions(call.getPlanner(), join, left, right)) {
createBroadcastPlan(call, join, join.getCondition(), PhysicalJoinType.NESTEDLOOP_JOIN,
left, right, null /* left collation */, null /* right collation */);
}
} catch (InvalidRelException e) {
tracer.warn(e.toString());
}
}
示例6: onMatch
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
@Override
public void onMatch(RelOptRuleCall call) {
final LogicalUnion union = (LogicalUnion) call.rel(0);
// This rule applies to Union-All only
if(!union.all) {
return;
}
final RelTraitSet traits = union.getTraitSet().plus(Rel.LOGICAL);
final List<RelNode> convertedInputs = new ArrayList<>();
for (RelNode input : union.getInputs()) {
final RelNode convertedInput = convert(input, input.getTraitSet().plus(Rel.LOGICAL).simplify());
convertedInputs.add(convertedInput);
}
try {
call.transformTo(new UnionRel(union.getCluster(), traits, convertedInputs, union.all,
true /* check compatibility */));
} catch (InvalidRelException e) {
tracer.warn(e.toString()) ;
}
}
示例7: onMatch
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
@Override
public void onMatch(RelOptRuleCall call) {
final LogicalAggregate aggregate = (LogicalAggregate) call.rel(0);
final RelNode input = call.rel(1);
if (aggregate.containsDistinctCall()) {
// currently, don't use this rule if any of the aggregates contains DISTINCT
return;
}
final RelTraitSet traits = aggregate.getTraitSet().plus(DrillRel.DRILL_LOGICAL);
final RelNode convertedInput = convert(input, input.getTraitSet().plus(DrillRel.DRILL_LOGICAL));
try {
call.transformTo(new DrillAggregateRel(aggregate.getCluster(), traits, convertedInput, aggregate.indicator,
aggregate.getGroupSet(), aggregate.getGroupSets(), aggregate.getAggCallList()));
} catch (InvalidRelException e) {
tracer.warning(e.toString());
}
}
示例8: QueryMetadata
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
public QueryMetadata(List<SqlIdentifier> ancestors,
List<FieldOrigin> fieldOrigins, List<JoinInfo> joins, List<ParentDatasetInfo> parents,
SqlNode sqlNode, RelDataType rowType,
List<ParentDataset> grandParents, final RelOptCost cost, final PlanningSet planningSet,
final RelNode serializableLogicalPlan,
BatchSchema batchSchema) {
this.rowType = rowType;
this.ancestors = Optional.fromNullable(ancestors);
this.fieldOrigins = Optional.fromNullable(fieldOrigins);
this.joins = Optional.fromNullable(joins);
this.parents = Optional.fromNullable(parents);
this.sqlNode = Optional.fromNullable(sqlNode);
this.grandParents = Optional.fromNullable(grandParents);
this.cost = Optional.fromNullable(cost);
this.planningSet = Optional.fromNullable(planningSet);
this.serializableLogicalPlan = Optional.fromNullable(serializableLogicalPlan);
this.batchSchema = batchSchema;
}
示例9: computeSelfCost
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
/**
* HashToRandomExchange processes M input rows and hash partitions them
* based on computing a hash value on the distribution fields.
* If there are N nodes (endpoints), we can assume for costing purposes
* on average each sender will send M/N rows to 1 destination endpoint.
* (See DrillCostBase for symbol notations)
* Include impact of skewness of distribution : the more keys used, the less likely the distribution will be skewed.
* The hash cpu cost will be proportional to 1 / #_keys.
* C = CPU cost of hashing k fields of M/N rows
* + CPU cost of SV remover for M/N rows
* + Network cost of sending M/N rows to 1 destination.
* So, C = (h * 1/k * M/N) + (s * M/N) + (w * M/N)
* Total cost = N * C
*/
@Override
public RelOptCost computeSelfCost(RelOptPlanner planner) {
if (PrelUtil.getSettings(getCluster()).useDefaultCosting()) {
return super.computeSelfCost(planner).multiplyBy(.1);
}
RelNode child = this.getInput();
double inputRows = RelMetadataQuery.getRowCount(child);
int rowWidth = child.getRowType().getFieldCount() * DrillCostBase.AVG_FIELD_WIDTH;
double hashCpuCost = DrillCostBase.HASH_CPU_COST * inputRows / fields.size();
double svrCpuCost = DrillCostBase.SVR_CPU_COST * inputRows;
double networkCost = DrillCostBase.BYTE_NETWORK_COST * inputRows * rowWidth;
DrillCostFactory costFactory = (DrillCostFactory)planner.getCostFactory();
return costFactory.makeCost(inputRows, hashCpuCost + svrCpuCost, 0, networkCost);
}
示例10: visit
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
@Override
public RelNode visit(LogicalJoin join) {
// to the best of my knowledge join.systemFieldList is always empty
Preconditions.checkState(join.getSystemFieldList().isEmpty(), "join.systemFieldList is not empty!");
final RelNode left = join.getLeft().accept(this);
final RelNode right = join.getRight().accept(this);
return new LogicalJoin(
cluster,
copyOf(join.getTraitSet()),
left,
right,
copyOf(join.getCondition()),
join.getVariablesSet(),
join.getJoinType(),
join.isSemiJoinDone(),
ImmutableList.<RelDataTypeField>of()
);
}
示例11: computeSelfCost
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
/**
* A SingleMergeExchange processes a total of M rows coming from N
* sorted input streams (from N senders) and merges them into a single
* output sorted stream. For costing purposes we can assume each sender
* is sending M/N rows to a single receiver.
* (See DrillCostBase for symbol notations)
* C = CPU cost of SV remover for M/N rows
* + Network cost of sending M/N rows to 1 destination.
* So, C = (s * M/N) + (w * M/N)
* Cost of merging M rows coming from N senders = (M log2 N) * c
* Total cost = N * C + (M log2 N) * c
*/
@Override
public RelOptCost computeSelfCost(RelOptPlanner planner) {
if (PrelUtil.getSettings(getCluster()).useDefaultCosting()) {
return super.computeSelfCost(planner).multiplyBy(.1);
}
RelNode child = this.getInput();
double inputRows = RelMetadataQuery.getRowCount(child);
int rowWidth = child.getRowType().getFieldCount() * DrillCostBase.AVG_FIELD_WIDTH;
double svrCpuCost = DrillCostBase.SVR_CPU_COST * inputRows;
double networkCost = DrillCostBase.BYTE_NETWORK_COST * inputRows * rowWidth;
int numEndPoints = PrelUtil.getSettings(getCluster()).numEndPoints();
double mergeCpuCost = DrillCostBase.COMPARE_CPU_COST * inputRows * (Math.log(numEndPoints)/Math.log(2));
DrillCostFactory costFactory = (DrillCostFactory)planner.getCostFactory();
return costFactory.makeCost(inputRows, svrCpuCost + mergeCpuCost, 0, networkCost);
}
示例12: JdbcPrel
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
public JdbcPrel(RelOptCluster cluster, RelTraitSet traitSet, JdbcIntermediatePrel prel) {
super(cluster, traitSet);
final RelNode input = prel.getInput();
rows = input.getRows();
convention = (DrillJdbcConvention) input.getTraitSet().getTrait(ConventionTraitDef.INSTANCE);
// generate sql for tree.
final SqlDialect dialect = convention.getPlugin().getDialect();
final JdbcImplementor jdbcImplementor = new JdbcImplementor(
dialect,
(JavaTypeFactory) getCluster().getTypeFactory());
final JdbcImplementor.Result result =
jdbcImplementor.visitChild(0, input.accept(new SubsetRemover()));
sql = result.asQuery().toSqlString(dialect).getSql();
rowType = input.getRowType();
}
示例13: planSubstituted
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
@Override
public void planSubstituted(DremioRelOptMaterialization materialization, List<RelNode> substitutions, RelNode target, long millisTaken) {
try {
// reflection was considered and matched
if (!consideredReflections.containsKey(materialization.getLayoutId())) {
final ReflectionState state = new ReflectionState(
materialization.getMaterializationId(),
materialization.getLayoutId(),
!substitutions.isEmpty() // non empty substitutions means that the reflected was matched at least once
);
consideredReflections.put(materialization.getLayoutId(), state);
}
} catch (Exception e) {
logger.error("AccelerationDetails populator failed to handle planSubstituted()", e);
}
}
示例14: findSubstitutions
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
@Override public List<Substitution> findSubstitutions(final RelNode query) {
final List<RelOptMaterialization> materializations =
SubstitutionUtils.findApplicableMaterializations(query, getMaterializations());
final List<Substitution> substitutions = Lists.newArrayList(new Substitution(query, null));
for (final RelOptMaterialization materialization : materializations) {
final int count = substitutions.size();
for (int i = 0; i < count; i++) {
try {
substitutions.addAll(substitute(substitutions.get(i).getReplacement(), materialization));
} catch (final Throwable ex) {
LOGGER.warn("unable to apply materialization: {}", materialization, ex);
}
}
}
// discard the original query
return substitutions.subList(1, substitutions.size());
}
示例15: getJoinCategory
import org.apache.calcite.rel.RelNode; //导入依赖的package包/类
public static JoinCategory getJoinCategory(RelNode left, RelNode right, RexNode condition,
List<Integer> leftKeys, List<Integer> rightKeys, List<Boolean> filterNulls) {
if (condition.isAlwaysTrue()) {
return JoinCategory.CARTESIAN;
}
leftKeys.clear();
rightKeys.clear();
filterNulls.clear();
RexNode remaining = RelOptUtil.splitJoinCondition(left, right, condition, leftKeys, rightKeys, filterNulls);
if (!remaining.isAlwaysTrue() || (leftKeys.size() == 0 || rightKeys.size() == 0) ) {
// for practical purposes these cases could be treated as inequality
return JoinCategory.INEQUALITY;
}
return JoinCategory.EQUALITY;
}