本文整理汇总了Java中org.apache.calcite.sql.util.ChainedSqlOperatorTable类的典型用法代码示例。如果您正苦于以下问题:Java ChainedSqlOperatorTable类的具体用法?Java ChainedSqlOperatorTable怎么用?Java ChainedSqlOperatorTable使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ChainedSqlOperatorTable类属于org.apache.calcite.sql.util包,在下文中一共展示了ChainedSqlOperatorTable类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: SqlConverter
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
public SqlConverter(QueryContext context) {
this.settings = context.getPlannerSettings();
this.util = context;
this.functions = context.getFunctionRegistry();
this.parserConfig = new DrillParserConfig(settings);
this.sqlToRelConverterConfig = new SqlToRelConverterConfig();
this.isInnerQuery = false;
this.typeFactory = new JavaTypeFactoryImpl(DRILL_TYPE_SYSTEM);
this.defaultSchema = context.getNewDefaultSchema();
this.rootSchema = rootSchema(defaultSchema);
this.temporarySchema = context.getConfig().getString(ExecConstants.DEFAULT_TEMPORARY_WORKSPACE);
this.session = context.getSession();
this.drillConfig = context.getConfig();
this.catalog = new DrillCalciteCatalogReader(
rootSchema,
parserConfig.caseSensitive(),
DynamicSchema.from(defaultSchema).path(null),
typeFactory,
drillConfig,
session);
this.opTab = new ChainedSqlOperatorTable(Arrays.asList(context.getDrillOperatorTable(), catalog));
this.costFactory = (settings.useDefaultCosting()) ? null : new DrillCostBase.DrillCostFactory();
this.validator = new DrillValidator(opTab, catalog, typeFactory, SqlConformance.DEFAULT);
validator.setIdentifierExpansion(true);
}
示例2: buildPlanner
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
private Planner buildPlanner(QueryContext context) {
final List<RelTraitDef> traitDefs = new ArrayList<RelTraitDef>();
traitDefs.add(ConventionTraitDef.INSTANCE);
traitDefs.add(RelCollationTraitDef.INSTANCE);
final ChainedSqlOperatorTable opTab =
new ChainedSqlOperatorTable(
ImmutableList.of(SqlStdOperatorTable.instance(),
HiveSqlOperatorTable.instance(), catalogReader));
FrameworkConfig config = Frameworks.newConfigBuilder() //
.parserConfig(SqlParser.configBuilder()
.setQuotedCasing(Casing.UNCHANGED)
.setUnquotedCasing(Casing.TO_UPPER)
.setQuoting(Quoting.DOUBLE_QUOTE)
.build()) //
.defaultSchema(context.getDefaultSchema()) //
.operatorTable(opTab) //
.traitDefs(traitDefs) //
.convertletTable(StandardConvertletTable.INSTANCE)//
.programs(getPrograms()) //
.typeSystem(RelDataTypeSystem.DEFAULT) //
.build();
return Frameworks.getPlanner(config);
}
示例3: testTranslate3
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
@Test public void testTranslate3() {
// TRANSLATE3 is not in the standard operator table
checkWholeExpFails("translate('aabbcc', 'ab', '+-')",
"No match found for function signature TRANSLATE3\\(<CHARACTER>, <CHARACTER>, <CHARACTER>\\)");
tester = tester.withOperatorTable(
ChainedSqlOperatorTable.of(OracleSqlOperatorTable.instance(),
SqlStdOperatorTable.instance()));
checkExpType("translate('aabbcc', 'ab', '+-')",
"VARCHAR(6) NOT NULL");
checkWholeExpFails("translate('abc', 'ab')",
"Invalid number of arguments to function 'TRANSLATE3'. Was expecting 3 arguments");
checkWholeExpFails("translate('abc', 'ab', 123)",
"(?s)Cannot apply 'TRANSLATE3' to arguments of type 'TRANSLATE3\\(<CHAR\\(3\\)>, <CHAR\\(2\\)>, <INTEGER>\\)'\\. .*");
checkWholeExpFails("translate('abc', 'ab', '+-', 'four')",
"Invalid number of arguments to function 'TRANSLATE3'. Was expecting 3 arguments");
}
示例4: oracleTester
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
protected SqlTester oracleTester() {
return tester.withOperatorTable(
ChainedSqlOperatorTable.of(OracleSqlOperatorTable.instance(),
SqlStdOperatorTable.instance()))
.withConnectionFactory(
CalciteAssert.EMPTY_CONNECTION_FACTORY
.with(new CalciteAssert
.AddSchemaSpecPostProcessor(CalciteAssert.SchemaSpec.HR))
.with("fun", "oracle"));
}
示例5: BeamQueryPlanner
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
public BeamQueryPlanner(SchemaPlus schema) {
String defaultCharsetKey = "saffron.default.charset";
if (System.getProperty(defaultCharsetKey) == null) {
System.setProperty(defaultCharsetKey, ConversionUtil.NATIVE_UTF16_CHARSET_NAME);
System.setProperty("saffron.default.nationalcharset",
ConversionUtil.NATIVE_UTF16_CHARSET_NAME);
System.setProperty("saffron.default.collation.name",
String.format("%s$%s", ConversionUtil.NATIVE_UTF16_CHARSET_NAME, "en_US"));
}
final List<RelTraitDef> traitDefs = new ArrayList<>();
traitDefs.add(ConventionTraitDef.INSTANCE);
traitDefs.add(RelCollationTraitDef.INSTANCE);
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema), false,
Collections.<String>emptyList(), TYPE_FACTORY));
FrameworkConfig config = Frameworks.newConfigBuilder()
.parserConfig(SqlParser.configBuilder().setLex(Lex.MYSQL).build()).defaultSchema(schema)
.traitDefs(traitDefs).context(Contexts.EMPTY_CONTEXT).ruleSets(BeamRuleSets.getRuleSets())
.costFactory(null).typeSystem(BeamRelDataTypeSystem.BEAM_REL_DATATYPE_SYSTEM)
.operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables))
.build();
this.planner = Frameworks.getPlanner(config);
for (String t : schema.getTableNames()) {
sourceTables.put(t, (BaseBeamTable) schema.getTable(t));
}
}
示例6: buildFrameWorkConfig
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
/**
* Method method build a calcite framework configuration for calcite to parse SQL and generate relational tree
* out of it.
* @return FrameworkConfig
*/
private FrameworkConfig buildFrameWorkConfig()
{
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables
.add(new CalciteCatalogReader(CalciteSchema.from(schema), false, Collections.<String>emptyList(), typeFactory));
return Frameworks.newConfigBuilder().defaultSchema(schema)
.parserConfig(SqlParser.configBuilder().setLex(Lex.MYSQL).build())
.operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)).build();
}
示例7: buildFrameWorkConfig
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
private FrameworkConfig buildFrameWorkConfig() {
if (hasUdf) {
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
false,
Collections.<String>emptyList(), typeFactory));
return Frameworks.newConfigBuilder().defaultSchema(schema)
.operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)).build();
} else {
return Frameworks.newConfigBuilder().defaultSchema(schema).build();
}
}
示例8: sqlOverDummyTable
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
public static CalciteState sqlOverDummyTable(String sql)
throws RelConversionException, ValidationException, SqlParseException {
SchemaPlus schema = Frameworks.createRootSchema(true);
JavaTypeFactory typeFactory = new JavaTypeFactoryImpl
(RelDataTypeSystem.DEFAULT);
StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory)
.field("ID", SqlTypeName.INTEGER)
.field("NAME", typeFactory.createType(String.class))
.field("ADDR", typeFactory.createType(String.class))
.build();
Table table = streamableTable.stream();
schema.add("FOO", table);
schema.add("BAR", table);
schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
false,
Collections.<String>emptyList(), typeFactory));
SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(
schema).operatorTable(chainedSqlOperatorTable).build();
Planner planner = Frameworks.getPlanner(config);
SqlNode parse = planner.parse(sql);
SqlNode validate = planner.validate(parse);
RelNode tree = planner.convert(validate);
System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
return new CalciteState(schema, tree);
}
示例9: createSqlValidator
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
private SqlValidator createSqlValidator(Context context,
CalciteCatalogReader catalogReader) {
final SqlOperatorTable opTab0 =
context.config().fun(SqlOperatorTable.class,
SqlStdOperatorTable.instance());
final SqlOperatorTable opTab =
ChainedSqlOperatorTable.of(opTab0, catalogReader);
final JavaTypeFactory typeFactory = context.getTypeFactory();
final SqlConformance conformance = context.config().conformance();
return new CalciteSqlValidator(opTab, catalogReader, typeFactory,
conformance);
}
示例10: createSqlValidator
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
private SqlValidator createSqlValidator(Context context,
CalciteCatalogReader catalogReader) {
final SqlOperatorTable opTab0 =
context.config().fun(SqlOperatorTable.class,
SqlStdOperatorTable.instance());
final SqlOperatorTable opTab =
ChainedSqlOperatorTable.of(opTab0, catalogReader);
final JavaTypeFactory typeFactory = context.getTypeFactory();
final SqlConformance conformance = context.config().conformance();
return new CalciteSqlValidator(opTab, catalogReader, typeFactory,
conformance);
}
示例11: testValidateUserDefinedAggregate
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
@Test public void testValidateUserDefinedAggregate() throws Exception {
final SqlStdOperatorTable stdOpTab = SqlStdOperatorTable.instance();
SqlOperatorTable opTab =
ChainedSqlOperatorTable.of(stdOpTab,
new ListSqlOperatorTable(
ImmutableList.<SqlOperator>of(new MyCountAggFunction())));
final SchemaPlus rootSchema = Frameworks.createRootSchema(true);
final FrameworkConfig config = Frameworks.newConfigBuilder()
.defaultSchema(
CalciteAssert.addSchema(rootSchema, CalciteAssert.SchemaSpec.HR))
.operatorTable(opTab)
.build();
final Planner planner = Frameworks.getPlanner(config);
SqlNode parse =
planner.parse("select \"deptno\", my_count(\"empid\") from \"emps\"\n"
+ "group by \"deptno\"");
assertThat(Util.toLinux(parse.toString()),
equalTo("SELECT `deptno`, `MY_COUNT`(`empid`)\n"
+ "FROM `emps`\n"
+ "GROUP BY `deptno`"));
// MY_COUNT is recognized as an aggregate function, and therefore it is OK
// that its argument empid is not in the GROUP BY clause.
SqlNode validate = planner.validate(parse);
assertThat(validate, notNullValue());
// The presence of an aggregate function in the SELECT clause causes it
// to become an aggregate query. Non-aggregate expressions become illegal.
planner.close();
planner.reset();
parse = planner.parse("select \"deptno\", count(1) from \"emps\"");
try {
validate = planner.validate(parse);
fail("expected exception, got " + validate);
} catch (ValidationException e) {
assertThat(e.getCause().getCause().getMessage(),
containsString("Expression 'deptno' is not being grouped"));
}
}
示例12: SqlConverter
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
public SqlConverter(
final PlannerSettings settings,
final SchemaPlus defaultSchema,
final SqlOperatorTable operatorTable,
final FunctionContext functionContext,
final MaterializationDescriptorProvider materializationProvider,
final FunctionImplementationRegistry functions,
final UserSession session,
final AttemptObserver observer,
final StoragePluginRegistry registry,
final SubstitutionProviderFactory factory
) {
this.nestingLevel = 0;
this.flattenCounter = new FlattenOpCounter();
this.observer = observer;
this.settings = settings;
this.functionContext = functionContext;
this.functions = functions;
this.session = Preconditions.checkNotNull(session, "user session is required");
this.parserConfig = ParserConfig.newInstance(session, settings);
this.isInnerQuery = false;
this.typeFactory = new JavaTypeFactoryImpl(TYPE_SYSTEM);
this.defaultSchema = defaultSchema;
this.rootSchema = rootSchema(defaultSchema);
this.catalog = new CalciteCatalogReader(
CalciteSchema.from(rootSchema),
parserConfig.caseSensitive(),
CalciteSchema.from(defaultSchema).path(null),
typeFactory);
// set catalog for MaterializedViewTable to create a deserializer
settings.setCatalog(catalog);
this.opTab = new ChainedSqlOperatorTable(Arrays.asList(operatorTable, catalog));
this.costFactory = (settings.useDefaultCosting()) ? null : new DremioCost.Factory();
this.validator = new SqlValidatorImpl(flattenCounter, opTab, catalog, typeFactory, DremioSqlConformance.INSTANCE);
validator.setIdentifierExpansion(true);
this.materializations = new MaterializationList(this, session, materializationProvider);
this.substitutions = AccelerationAwareSubstitutionProvider.of(factory.getSubstitutionProvider(materializations, this.settings.options));
this.planner = new DremioVolcanoPlanner(this);
this.cluster = RelOptCluster.create(planner, new DremioRexBuilder(typeFactory));
this.cluster.setMetadataProvider(DefaultRelMetadataProvider.INSTANCE);
this.registry = Preconditions.checkNotNull(registry, "registry cannot be null");
}
示例13: plan
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
public RelRoot plan(String query) {
try {
Connection connection = DriverManager.getConnection("jdbc:calcite:");
CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class);
SchemaPlus rootSchema = calciteConnection.getRootSchema();
for (SqlSystemStreamConfig ssc : systemStreamConfigBySource.values()) {
SchemaPlus previousLevelSchema = rootSchema;
List<String> sourceParts = ssc.getSourceParts();
RelSchemaProvider relSchemaProvider = relSchemaProviders.get(ssc.getSource());
for (String sourcePart : sourceParts) {
if (!sourcePart.equalsIgnoreCase(ssc.getStreamName())) {
SchemaPlus sourcePartSchema = rootSchema.getSubSchema(sourcePart);
if (sourcePartSchema == null) {
sourcePartSchema = previousLevelSchema.add(sourcePart, new AbstractSchema());
}
previousLevelSchema = sourcePartSchema;
} else {
// If the source part is the streamName, then fetch the schema corresponding to the stream and register.
RelDataType relationalSchema = relSchemaProvider.getRelationalSchema();
previousLevelSchema.add(ssc.getStreamName(), createTableFromRelSchema(relationalSchema));
break;
}
}
}
List<SamzaSqlScalarFunctionImpl> samzaSqlFunctions = udfMetadata.stream()
.map(x -> new SamzaSqlScalarFunctionImpl(x.getName(), x.getUdfMethod()))
.collect(Collectors.toList());
final List<RelTraitDef> traitDefs = new ArrayList<>();
traitDefs.add(ConventionTraitDef.INSTANCE);
traitDefs.add(RelCollationTraitDef.INSTANCE);
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(new SamzaSqlOperatorTable());
sqlOperatorTables.add(new SamzaSqlUdfOperatorTable(samzaSqlFunctions));
FrameworkConfig frameworkConfig = Frameworks.newConfigBuilder()
.parserConfig(SqlParser.configBuilder().setLex(Lex.JAVA).build())
.defaultSchema(rootSchema)
.operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables))
.traitDefs(traitDefs)
.context(Contexts.EMPTY_CONTEXT)
.costFactory(null)
.build();
Planner planner = Frameworks.getPlanner(frameworkConfig);
SqlNode sql = planner.parse(query);
SqlNode validatedSql = planner.validate(sql);
RelRoot relRoot = planner.rel(validatedSql);
LOG.info("query plan:\n" + sql.toString());
LOG.info("relational graph:");
printRelGraph(relRoot.project());
return relRoot;
} catch (Exception e) {
LOG.error("Query planner failed with exception.", e);
throw new SamzaException(e);
}
}
示例14: sqlOverNestedTable
import org.apache.calcite.sql.util.ChainedSqlOperatorTable; //导入依赖的package包/类
public static CalciteState sqlOverNestedTable(String sql)
throws RelConversionException, ValidationException, SqlParseException {
SchemaPlus schema = Frameworks.createRootSchema(true);
JavaTypeFactory typeFactory = new JavaTypeFactoryImpl
(RelDataTypeSystem.DEFAULT);
StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory)
.field("ID", SqlTypeName.INTEGER)
.field("MAPFIELD",
typeFactory.createTypeWithNullability(
typeFactory.createMapType(
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.INTEGER), true))
, true))
.field("NESTEDMAPFIELD",
typeFactory.createTypeWithNullability(
typeFactory.createMapType(
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
typeFactory.createTypeWithNullability(
typeFactory.createMapType(
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.INTEGER), true))
, true))
, true))
.field("ARRAYFIELD", typeFactory.createTypeWithNullability(
typeFactory.createArrayType(
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.INTEGER), true), -1L)
, true))
.build();
Table table = streamableTable.stream();
schema.add("FOO", table);
schema.add("BAR", table);
schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
false,
Collections.<String>emptyList(), typeFactory));
SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(
schema).operatorTable(chainedSqlOperatorTable).build();
Planner planner = Frameworks.getPlanner(config);
SqlNode parse = planner.parse(sql);
SqlNode validate = planner.validate(parse);
RelNode tree = planner.convert(validate);
System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
return new CalciteState(schema, tree);
}