本文整理汇总了Java中org.apache.flink.table.api.java.StreamTableEnvironment.sql方法的典型用法代码示例。如果您正苦于以下问题:Java StreamTableEnvironment.sql方法的具体用法?Java StreamTableEnvironment.sql怎么用?Java StreamTableEnvironment.sql使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.table.api.java.StreamTableEnvironment
的用法示例。
在下文中一共展示了StreamTableEnvironment.sql方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSelect
import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
@Test
public void testSelect() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
StreamITCase.clear();
DataStream<Tuple3<Integer, Long, String>> ds = StreamTestData.getSmall3TupleDataSet(env);
Table in = tableEnv.fromDataStream(ds, "a,b,c");
tableEnv.registerTable("MyTable", in);
String sqlQuery = "SELECT * FROM MyTable";
Table result = tableEnv.sql(sqlQuery);
DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
resultSet.addSink(new StreamITCase.StringSink<Row>());
env.execute();
List<String> expected = new ArrayList<>();
expected.add("1,1,Hi");
expected.add("2,2,Hello");
expected.add("3,2,Hello world");
StreamITCase.compareWithList(expected);
}
示例2: testFilter
import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
@Test
public void testFilter() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
StreamITCase.clear();
DataStream<Tuple5<Integer, Long, Integer, String, Long>> ds = StreamTestData.get5TupleDataStream(env);
tableEnv.registerDataStream("MyTable", ds, "a, b, c, d, e");
String sqlQuery = "SELECT a, b, e FROM MyTable WHERE c < 4";
Table result = tableEnv.sql(sqlQuery);
DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
resultSet.addSink(new StreamITCase.StringSink<Row>());
env.execute();
List<String> expected = new ArrayList<>();
expected.add("1,1,1");
expected.add("2,2,2");
expected.add("2,3,1");
expected.add("3,4,2");
StreamITCase.compareWithList(expected);
}
示例3: tcFlinkAvroSQL
import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void tcFlinkAvroSQL(String KafkaServerHostPort, String SchemaRegistryHostPort,
String srcTopic, String targetTopic,
String consumerGroupId, String sinkKeys, String sqlState) {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
Properties properties = new Properties();
properties.setProperty(ConstantApp.PK_KAFKA_HOST_PORT.replace("_", "."), KafkaServerHostPort);
properties.setProperty(ConstantApp.PK_KAFKA_CONSUMER_GROURP, consumerGroupId);
properties.setProperty(ConstantApp.PK_KAFKA_SCHEMA_REGISTRY_HOST_PORT.replace("_", "."), SchemaRegistryHostPort);
properties.setProperty(ConstantApp.PK_FLINK_TABLE_SINK_KEYS, sinkKeys);
String[] srcTopicList = srcTopic.split(",");
for (int i = 0; i < srcTopicList.length; i++) {
properties.setProperty(ConstantApp.PK_SCHEMA_SUB_INPUT, srcTopicList[i]);
properties.setProperty(ConstantApp.PK_SCHEMA_ID_INPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT) + "");
properties.setProperty(ConstantApp.PK_SCHEMA_STR_INPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT).toString());
tableEnv.registerTableSource(srcTopicList[i], new Kafka010AvroTableSource(srcTopicList[i], properties));
}
try {
Table result = tableEnv.sql(sqlState);
SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic, result);
// For old producer, we need to create topic-value subject as well
SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic + "-value", result);
// delivered properties for sink
properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, targetTopic);
properties.setProperty(ConstantApp.PK_SCHEMA_ID_OUTPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT) + "");
properties.setProperty(ConstantApp.PK_SCHEMA_STR_OUTPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT).toString());
Kafka09AvroTableSink avro_sink =
new Kafka09AvroTableSink(targetTopic, properties, new FlinkFixedPartitioner());
result.writeToSink(avro_sink);
env.execute("DF_FlinkSQL_Client_" + srcTopic + "-" + targetTopic);
} catch (Exception e) {
e.printStackTrace();
}
}
示例4: testFlinkAvroSQL
import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void testFlinkAvroSQL() {
System.out.println("TestCase_Test Avro SQL");
String resultFile = "/home/vagrant/test.txt";
String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath();
StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath)
.setParallelism(1);
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", "localhost:9092");
properties.setProperty("group.id", "consumer_test");
properties.setProperty("schema.subject", "test-value");
properties.setProperty("schema.registry", "localhost:8081");
properties.setProperty("static.avro.schema", "empty_schema");
try {
Kafka09AvroTableSource kafkaAvroTableSource = new Kafka09AvroTableSource("test", properties);
tableEnv.registerTableSource("Orders", kafkaAvroTableSource);
//Table result = tableEnv.sql("SELECT STREAM name, symbol, exchange FROM Orders");
Table result = tableEnv.sql("SELECT name, symbol, exchangecode FROM Orders");
Files.deleteIfExists(Paths.get(resultFile));
// create a TableSink
TableSink sink = new CsvTableSink(resultFile, "|");
// write the result Table to the TableSink
result.writeToSink(sink);
env.execute("Flink AVRO SQL KAFKA Test");
} catch (Exception e) {
e.printStackTrace();
}
}
示例5: testFlinkAvroSQLJson
import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void testFlinkAvroSQLJson() {
System.out.println("TestCase_Test Avro SQL to Json Sink");
final String STATIC_USER_SCHEMA = "{"
+ "\"type\":\"record\","
+ "\"name\":\"myrecord\","
+ "\"fields\":["
+ " { \"name\":\"symbol\", \"type\":\"string\" },"
+ " { \"name\":\"name\", \"type\":\"string\" },"
+ " { \"name\":\"exchangecode\", \"type\":\"string\" }"
+ "]}";
String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath();
DFRemoteStreamEnvironment env = new DFRemoteStreamEnvironment("localhost", 6123, jarPath)
.setParallelism(1);
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", "localhost:9092");
properties.setProperty("group.id", "consumer_test");
properties.setProperty("schema.subject", "test-value");
properties.setProperty("schema.registry", "localhost:8081");
properties.setProperty("useAvro", "avro");
properties.setProperty("static.avro.schema",
SchemaRegistryClient.getSchemaFromRegistry("http://localhost:8081", "test-value", "latest").toString());
try {
HashMap<String, String> hm = new HashMap<>();
Kafka09AvroTableSource kafkaAvroTableSource = new Kafka09AvroTableSource("test", properties);
tableEnv.registerTableSource("Orders", kafkaAvroTableSource);
Table result = tableEnv.sql("SELECT name, symbol, exchangecode FROM Orders");
//Kafka09JsonTableSink json_sink = new Kafka09JsonTableSink ("test_json", properties, new FlinkFixedPartitioner());
Kafka09AvroTableSink json_sink = new Kafka09AvroTableSink ("test_json", properties, new FlinkFixedPartitioner());
// write the result Table to the TableSink
result.writeToSink(json_sink);
env.executeWithDFObj("Flink AVRO SQL KAFKA Test", new DFJobPOPJ().setJobConfig(hm) );
} catch (Exception e) {
e.printStackTrace();
}
}
示例6: testRowRegisterRowWithNames
import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
@Test
public void testRowRegisterRowWithNames() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
StreamITCase.clear();
List<Row> data = new ArrayList<>();
data.add(Row.of(1, 1L, "Hi"));
data.add(Row.of(2, 2L, "Hello"));
data.add(Row.of(3, 2L, "Hello world"));
TypeInformation<?>[] types = {
BasicTypeInfo.INT_TYPE_INFO,
BasicTypeInfo.LONG_TYPE_INFO,
BasicTypeInfo.STRING_TYPE_INFO};
String[] names = {"a", "b", "c"};
RowTypeInfo typeInfo = new RowTypeInfo(types, names);
DataStream<Row> ds = env.fromCollection(data).returns(typeInfo);
Table in = tableEnv.fromDataStream(ds, "a,b,c");
tableEnv.registerTable("MyTableRow", in);
String sqlQuery = "SELECT a,c FROM MyTableRow";
Table result = tableEnv.sql(sqlQuery);
DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
resultSet.addSink(new StreamITCase.StringSink<Row>());
env.execute();
List<String> expected = new ArrayList<>();
expected.add("1,Hi");
expected.add("2,Hello");
expected.add("3,Hello world");
StreamITCase.compareWithList(expected);
}
示例7: testUnion
import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
@Test
public void testUnion() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
StreamITCase.clear();
DataStream<Tuple3<Integer, Long, String>> ds1 = StreamTestData.getSmall3TupleDataSet(env);
Table t1 = tableEnv.fromDataStream(ds1, "a,b,c");
tableEnv.registerTable("T1", t1);
DataStream<Tuple5<Integer, Long, Integer, String, Long>> ds2 = StreamTestData.get5TupleDataStream(env);
tableEnv.registerDataStream("T2", ds2, "a, b, d, c, e");
String sqlQuery = "SELECT * FROM T1 " +
"UNION ALL " +
"(SELECT a, b, c FROM T2 WHERE a < 3)";
Table result = tableEnv.sql(sqlQuery);
DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
resultSet.addSink(new StreamITCase.StringSink<Row>());
env.execute();
List<String> expected = new ArrayList<>();
expected.add("1,1,Hi");
expected.add("2,2,Hello");
expected.add("3,2,Hello world");
expected.add("1,1,Hallo");
expected.add("2,2,Hallo Welt");
expected.add("2,3,Hallo Welt wie");
StreamITCase.compareWithList(expected);
}
示例8: testEndToEnd
import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
/**
* Tests the end-to-end functionality of table source & sink.
*
* <p>This test uses the {@link FlinkPravegaTableSink} to emit an in-memory table
* containing sample data as a Pravega stream of 'append' events (i.e. as a changelog).
* The test then uses the {@link FlinkPravegaTableSource} to absorb the changelog as a new table.
*
* <p>Flink's ability to convert POJOs (e.g. {@link SampleRecord}) to/from table rows is also demonstrated.
*
* <p>Because the source is unbounded, the test must throw an exception to deliberately terminate the job.
*
* @throws Exception on exception
*/
@Test
public void testEndToEnd() throws Exception {
// create a Pravega stream for test purposes
StreamId stream = new StreamId(setupUtils.getScope(), "FlinkTableITCase.testEndToEnd");
this.setupUtils.createTestStream(stream.getName(), 1);
// create a Flink Table environment
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment().setParallelism(1);
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
// define a table of sample data from a collection of POJOs. Schema:
// root
// |-- category: String
// |-- value: Integer
Table table = tableEnv.fromDataStream(env.fromCollection(SAMPLES));
// write the table to a Pravega stream (using the 'category' column as a routing key)
FlinkPravegaTableSink sink = new FlinkPravegaTableSink(
this.setupUtils.getControllerUri(), stream, JsonRowSerializationSchema::new, "category");
table.writeToSink(sink);
// register the Pravega stream as a table called 'samples'
FlinkPravegaTableSource source = new FlinkPravegaTableSource(
this.setupUtils.getControllerUri(), stream, 0, JsonRowDeserializationSchema::new, SAMPLE_SCHEMA);
tableEnv.registerTableSource("samples", source);
// select some sample data from the Pravega-backed table, as a view
Table view = tableEnv.sql("SELECT * FROM samples WHERE category IN ('A','B')");
// write the view to a test sink that verifies the data for test purposes
tableEnv.toAppendStream(view, SampleRecord.class).addSink(new TestSink(SAMPLES));
// execute the topology
try {
env.execute();
Assert.fail("expected an exception");
} catch (JobExecutionException e) {
// we expect the job to fail because the test sink throws a deliberate exception.
Assert.assertTrue(e.getCause() instanceof TestCompletionException);
}
}
示例9: tcFlinkAvroSQL
import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void tcFlinkAvroSQL(String SchemaRegistryHostPort, String srcTopic, String targetTopic, String sqlState) {
System.out.println("tcFlinkAvroSQL");
String resultFile = "testResult";
String jarPath = "C:/Users/dadu/Coding/df_data_service/target/df-data-service-1.1-SNAPSHOT-fat.jar";
//String jarPath = "/Users/will/Documents/Coding/GitHub/df_data_service/target/df-data-service-1.1-SNAPSHOT-fat.jar";
StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath)
.setParallelism(1);
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
Properties properties = new Properties();
properties.setProperty(ConstantApp.PK_KAFKA_HOST_PORT.replace("_", "."), "localhost:9092");
properties.setProperty(ConstantApp.PK_KAFKA_CONSUMER_GROURP, "consumer_test");
//properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, "test");
properties.setProperty(ConstantApp.PK_KAFKA_SCHEMA_REGISTRY_HOST_PORT.replace("_", "."), SchemaRegistryHostPort);
properties.setProperty(ConstantApp.PK_FLINK_TABLE_SINK_KEYS, "symbol");
String[] srcTopicList = srcTopic.split(",");
for (int i = 0; i < srcTopicList.length; i++) {
properties.setProperty(ConstantApp.PK_SCHEMA_SUB_INPUT, srcTopicList[i]);
properties.setProperty(ConstantApp.PK_SCHEMA_ID_INPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT) + "");
properties.setProperty(ConstantApp.PK_SCHEMA_STR_INPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT).toString());
tableEnv.registerTableSource(srcTopicList[i], new Kafka010AvroTableSource(srcTopicList[i], properties));
}
try {
Table result = tableEnv.sql(sqlState);
result.printSchema();
System.out.println("generated avro schema is = " + SchemaRegistryClient.tableAPIToAvroSchema(result, targetTopic));
SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic, result);
// delivered properties
properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, targetTopic);
properties.setProperty(ConstantApp.PK_SCHEMA_ID_OUTPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT) + "");
properties.setProperty(ConstantApp.PK_SCHEMA_STR_OUTPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT).toString());
System.out.println(Paths.get(resultFile).toAbsolutePath());
Kafka09AvroTableSink avro_sink =
new Kafka09AvroTableSink(targetTopic, properties, new FlinkFixedPartitioner());
result.writeToSink(avro_sink);
//result.writeToSink(new CsvTableSink(resultFile, "|", 1, FileSystem.WriteMode.OVERWRITE));
env.execute("tcFlinkAvroSQL");
} catch (Exception e) {
e.printStackTrace();
}
}
示例10: testFlinkSQL
import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void testFlinkSQL() {
LOG.info("Only Unit Testing Function is enabled");
String resultFile = "/home/vagrant/test.txt";
try {
String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath();
StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath)
.setParallelism(1);
String kafkaTopic = "finance";
String kafkaTopic_stage = "df_trans_stage_finance";
String kafkaTopic_out = "df_trans_out_finance";
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", "localhost:9092");
properties.setProperty("group.id", "consumer3");
// Internal covert Json String to Json - Begin
DataStream<String> stream = env
.addSource(new FlinkKafkaConsumer09<>(kafkaTopic, new SimpleStringSchema(), properties));
stream.map(new MapFunction<String, String>() {
@Override
public String map(String jsonString) throws Exception {
return jsonString.replaceAll("\\\\", "").replace("\"{", "{").replace("}\"","}");
}
}).addSink(new FlinkKafkaProducer09<String>("localhost:9092", kafkaTopic_stage, new SimpleStringSchema()));
// Internal covert Json String to Json - End
String[] fieldNames = new String[] {"name"};
Class<?>[] fieldTypes = new Class<?>[] {String.class};
Kafka09AvroTableSource kafkaTableSource = new Kafka09AvroTableSource(
kafkaTopic_stage,
properties,
fieldNames,
fieldTypes);
//kafkaTableSource.setFailOnMissingField(true);
tableEnv.registerTableSource("Orders", kafkaTableSource);
//Table result = tableEnv.sql("SELECT STREAM name FROM Orders");
Table result = tableEnv.sql("SELECT name FROM Orders");
Files.deleteIfExists(Paths.get(resultFile));
// create a TableSink
TableSink sink = new CsvTableSink(resultFile, "|");
// write the result Table to the TableSink
result.writeToSink(sink);
env.execute("FlinkConsumer");
} catch (Exception e) {
e.printStackTrace();
}
}
示例11: testFlinkAvroSQLWithStaticSchema
import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void testFlinkAvroSQLWithStaticSchema() {
System.out.println("TestCase_Test Avro SQL with static Schema");
final String STATIC_USER_SCHEMA = "{"
+ "\"type\":\"record\","
+ "\"name\":\"myrecord\","
+ "\"fields\":["
+ " { \"name\":\"symbol\", \"type\":\"string\" },"
+ " { \"name\":\"name\", \"type\":\"string\" },"
+ " { \"name\":\"exchangecode\", \"type\":\"string\" }"
+ "]}";
String resultFile = "/home/vagrant/test.txt";
String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath();
StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath)
.setParallelism(1);
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", "localhost:9092");
properties.setProperty("group.id", "consumer_test");
properties.setProperty("schema.subject", "test-value");
properties.setProperty("schema.registry", "localhost:8081");
properties.setProperty("static.avro.schema", STATIC_USER_SCHEMA);
try {
Kafka09AvroTableSource kafkaAvroTableSource = new Kafka09AvroTableSource("test", properties);
tableEnv.registerTableSource("Orders", kafkaAvroTableSource);
//Table result = tableEnv.sql("SELECT STREAM name, symbol, exchange FROM Orders");
Table result = tableEnv.sql("SELECT name, symbol, exchangecode FROM Orders");
Files.deleteIfExists(Paths.get(resultFile));
// create a TableSink
TableSink sink = new CsvTableSink(resultFile, "|");
// write the result Table to the TableSink
result.writeToSink(sink);
env.execute("Flink AVRO SQL KAFKA Test");
} catch (Exception e) {
e.printStackTrace();
}
}