当前位置: 首页>>代码示例>>Java>>正文


Java StreamTableEnvironment类代码示例

本文整理汇总了Java中org.apache.flink.table.api.java.StreamTableEnvironment的典型用法代码示例。如果您正苦于以下问题:Java StreamTableEnvironment类的具体用法?Java StreamTableEnvironment怎么用?Java StreamTableEnvironment使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


StreamTableEnvironment类属于org.apache.flink.table.api.java包,在下文中一共展示了StreamTableEnvironment类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
  StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.createLocalEnvironment();
  StreamTableEnvironment env = StreamTableEnvironment.getTableEnvironment(execEnv);
  execEnv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
  CompilationResult res = new CompilationResult();

  try {
    JobDescriptor job = getJobConf(System.in);
    res.jobGraph(new JobCompiler(env, job).getJobGraph());
  } catch (Throwable e) {
    res.remoteThrowable(e);
  }

  try (OutputStream out = chooseOutputStream(args)) {
    out.write(res.serialize());
  }
}
 
开发者ID:uber,项目名称:AthenaX,代码行数:18,代码来源:JobCompiler.java

示例2: testSelect

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
@Test
public void testSelect() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
	StreamITCase.clear();

	DataStream<Tuple3<Integer, Long, String>> ds = StreamTestData.getSmall3TupleDataSet(env);
	Table in = tableEnv.fromDataStream(ds, "a,b,c");
	tableEnv.registerTable("MyTable", in);

	String sqlQuery = "SELECT * FROM MyTable";
	Table result = tableEnv.sql(sqlQuery);

	DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
	resultSet.addSink(new StreamITCase.StringSink<Row>());
	env.execute();

	List<String> expected = new ArrayList<>();
	expected.add("1,1,Hi");
	expected.add("2,2,Hello");
	expected.add("3,2,Hello world");

	StreamITCase.compareWithList(expected);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:SqlITCase.java

示例3: testFilter

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
@Test
public void testFilter() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
	StreamITCase.clear();

	DataStream<Tuple5<Integer, Long, Integer, String, Long>> ds = StreamTestData.get5TupleDataStream(env);
	tableEnv.registerDataStream("MyTable", ds, "a, b, c, d, e");

	String sqlQuery = "SELECT a, b, e FROM MyTable WHERE c < 4";
	Table result = tableEnv.sql(sqlQuery);

	DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
	resultSet.addSink(new StreamITCase.StringSink<Row>());
	env.execute();

	List<String> expected = new ArrayList<>();
	expected.add("1,1,1");
	expected.add("2,2,2");
	expected.add("2,3,1");
	expected.add("3,4,2");

	StreamITCase.compareWithList(expected);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:SqlITCase.java

示例4: testSelect

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
@Test
public void testSelect() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
	StreamITCase.clear();

	DataStream<Tuple3<Integer, Long, String>> ds = JavaStreamTestData.getSmall3TupleDataSet(env);
	Table in = tableEnv.fromDataStream(ds, "a,b,c");
	tableEnv.registerTable("MyTable", in);

	String sqlQuery = "SELECT * FROM MyTable";
	Table result = tableEnv.sqlQuery(sqlQuery);

	DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
	resultSet.addSink(new StreamITCase.StringSink<Row>());
	env.execute();

	List<String> expected = new ArrayList<>();
	expected.add("1,1,Hi");
	expected.add("2,2,Hello");
	expected.add("3,2,Hello world");

	StreamITCase.compareWithList(expected);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:JavaSqlITCase.java

示例5: testFilter

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
@Test
public void testFilter() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
	StreamITCase.clear();

	DataStream<Tuple5<Integer, Long, Integer, String, Long>> ds = JavaStreamTestData.get5TupleDataStream(env);
	tableEnv.registerDataStream("MyTable", ds, "a, b, c, d, e");

	String sqlQuery = "SELECT a, b, e FROM MyTable WHERE c < 4";
	Table result = tableEnv.sqlQuery(sqlQuery);

	DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
	resultSet.addSink(new StreamITCase.StringSink<Row>());
	env.execute();

	List<String> expected = new ArrayList<>();
	expected.add("1,1,1");
	expected.add("2,2,2");
	expected.add("2,3,1");
	expected.add("3,4,2");

	StreamITCase.compareWithList(expected);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:JavaSqlITCase.java

示例6: main

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    // Read parameters from command line
    final ParameterTool params = ParameterTool.fromArgs(args);

    if(params.getNumberOfParameters() < 4) {
        System.out.println("\nUsage: FlinkReadKafka --read-topic <topic> --write-topic <topic> --bootstrap.servers <kafka brokers> --group.id <groupid>");
        return;
    }


    // setup streaming environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));
    env.enableCheckpointing(300000); // 300 seconds
    env.getConfig().setGlobalJobParameters(params);

    DataStream<String> messageStream = env
            .addSource(new FlinkKafkaConsumer010<>(
                    params.getRequired("read-topic"),
                    new SimpleStringSchema(),
                    params.getProperties())).name("Read from Kafka");

    // setup table environment
    StreamTableEnvironment sTableEnv = TableEnvironment.getTableEnvironment(env);


    // Write JSON payload back to Kafka topic
    messageStream.addSink(new FlinkKafkaProducer010<>(
                params.getRequired("write-topic"),
                new SimpleStringSchema(),
                params.getProperties())).name("Write To Kafka");

    env.execute("FlinkReadWriteKafka");
}
 
开发者ID:kgorman,项目名称:TrafficAnalyzer,代码行数:35,代码来源:FlinkReadWriteKafka.java

示例7: tcFlinkAvroSQL

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
public static void tcFlinkAvroSQL(String KafkaServerHostPort, String SchemaRegistryHostPort,
                                  String srcTopic, String targetTopic,
                                  String consumerGroupId, String sinkKeys, String sqlState) {

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);

    Properties properties = new Properties();
    properties.setProperty(ConstantApp.PK_KAFKA_HOST_PORT.replace("_", "."), KafkaServerHostPort);
    properties.setProperty(ConstantApp.PK_KAFKA_CONSUMER_GROURP, consumerGroupId);
    properties.setProperty(ConstantApp.PK_KAFKA_SCHEMA_REGISTRY_HOST_PORT.replace("_", "."), SchemaRegistryHostPort);
    properties.setProperty(ConstantApp.PK_FLINK_TABLE_SINK_KEYS, sinkKeys);

    String[] srcTopicList = srcTopic.split(",");
    for (int i = 0; i < srcTopicList.length; i++) {
        properties.setProperty(ConstantApp.PK_SCHEMA_SUB_INPUT, srcTopicList[i]);
        properties.setProperty(ConstantApp.PK_SCHEMA_ID_INPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT) + "");
        properties.setProperty(ConstantApp.PK_SCHEMA_STR_INPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT).toString());
        tableEnv.registerTableSource(srcTopicList[i], new Kafka010AvroTableSource(srcTopicList[i], properties));
    }

    try {
        Table result = tableEnv.sql(sqlState);
        SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic, result);
        // For old producer, we need to create topic-value subject as well
        SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic + "-value", result);

        // delivered properties for sink
        properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, targetTopic);
        properties.setProperty(ConstantApp.PK_SCHEMA_ID_OUTPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT) + "");
        properties.setProperty(ConstantApp.PK_SCHEMA_STR_OUTPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT).toString());

        Kafka09AvroTableSink avro_sink =
                new Kafka09AvroTableSink(targetTopic, properties, new FlinkFixedPartitioner());
        result.writeToSink(avro_sink);
        env.execute("DF_FlinkSQL_Client_" + srcTopic + "-" + targetTopic);
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:41,代码来源:FlinkAvroSQLClient.java

示例8: testFlinkAvroSQL

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
public static void testFlinkAvroSQL() {
    System.out.println("TestCase_Test Avro SQL");
    String resultFile = "/home/vagrant/test.txt";

    String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath();
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath)
            .setParallelism(1);
    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
    Properties properties = new Properties();
    properties.setProperty("bootstrap.servers", "localhost:9092");
    properties.setProperty("group.id", "consumer_test");
    properties.setProperty("schema.subject", "test-value");
    properties.setProperty("schema.registry", "localhost:8081");
    properties.setProperty("static.avro.schema", "empty_schema");

    try {
        Kafka09AvroTableSource kafkaAvroTableSource =  new Kafka09AvroTableSource("test", properties);
        tableEnv.registerTableSource("Orders", kafkaAvroTableSource);

        //Table result = tableEnv.sql("SELECT STREAM name, symbol, exchange FROM Orders");
        Table result = tableEnv.sql("SELECT name, symbol, exchangecode FROM Orders");

        Files.deleteIfExists(Paths.get(resultFile));

        // create a TableSink
        TableSink sink = new CsvTableSink(resultFile, "|");
        // write the result Table to the TableSink
        result.writeToSink(sink);
        env.execute("Flink AVRO SQL KAFKA Test");
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:34,代码来源:UnitTestSuiteFlink.java

示例9: testFlinkAvroSQLJson

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
public static void testFlinkAvroSQLJson() {
    System.out.println("TestCase_Test Avro SQL to Json Sink");
    final String STATIC_USER_SCHEMA = "{"
            + "\"type\":\"record\","
            + "\"name\":\"myrecord\","
            + "\"fields\":["
            + "  { \"name\":\"symbol\", \"type\":\"string\" },"
            + "  { \"name\":\"name\", \"type\":\"string\" },"
            + "  { \"name\":\"exchangecode\", \"type\":\"string\" }"
            + "]}";

    String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath();
    DFRemoteStreamEnvironment env = new DFRemoteStreamEnvironment("localhost", 6123, jarPath)
            .setParallelism(1);
    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
    Properties properties = new Properties();
    properties.setProperty("bootstrap.servers", "localhost:9092");
    properties.setProperty("group.id", "consumer_test");
    properties.setProperty("schema.subject", "test-value");
    properties.setProperty("schema.registry", "localhost:8081");
    properties.setProperty("useAvro", "avro");
    properties.setProperty("static.avro.schema",
            SchemaRegistryClient.getSchemaFromRegistry("http://localhost:8081", "test-value", "latest").toString());

    try {
        HashMap<String, String> hm = new HashMap<>();
        Kafka09AvroTableSource kafkaAvroTableSource =  new Kafka09AvroTableSource("test", properties);
        tableEnv.registerTableSource("Orders", kafkaAvroTableSource);

        Table result = tableEnv.sql("SELECT name, symbol, exchangecode FROM Orders");
        //Kafka09JsonTableSink json_sink = new Kafka09JsonTableSink ("test_json", properties, new FlinkFixedPartitioner());
        Kafka09AvroTableSink json_sink = new Kafka09AvroTableSink ("test_json", properties, new FlinkFixedPartitioner());

        // write the result Table to the TableSink
        result.writeToSink(json_sink);
        env.executeWithDFObj("Flink AVRO SQL KAFKA Test", new DFJobPOPJ().setJobConfig(hm) );
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:41,代码来源:UnitTestSuiteFlink.java

示例10: main

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
public static void main(String args[]) {

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);

		// Create a DataStream from a list of elements
		//DataStream<Integer> ds = env.fromElements(1, 2, 3, 4, 5);

		CsvTableSource csvTableSource = new CsvTableSource(
				"/Users/will/Downloads/file.csv",
				new String[] { "name", "id", "score", "comments" },
				new TypeInformation<?>[] {
						Types.STRING(),
						Types.STRING(),
						Types.STRING(),
						Types.STRING()
				}); // lenient

		tableEnv.registerTableSource("mycsv", csvTableSource);



		TableSink sink = new CsvTableSink("/Users/will/Downloads/out.csv", "|");


		//tableEnv.registerDataStream("tbl", ds, "a");
		//Table ingest = tableEnv.fromDataStream(ds, "name");
		Table in = tableEnv.scan("mycsv");
		//Table in = tableEnv.ingest("tbl");
		//Table in = tableEnv.fromDataStream(ds, "a");

		Table result = in.select("name");
		result.writeToSink(sink);
		try {
			env.execute();
		} catch (Exception e) {

		}

		System.out.print("DONE");
	}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:42,代码来源:WordCountStream.java

示例11: testRowRegisterRowWithNames

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
@Test
public void testRowRegisterRowWithNames() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
	StreamITCase.clear();

	List<Row> data = new ArrayList<>();
	data.add(Row.of(1, 1L, "Hi"));
	data.add(Row.of(2, 2L, "Hello"));
	data.add(Row.of(3, 2L, "Hello world"));

	TypeInformation<?>[] types = {
			BasicTypeInfo.INT_TYPE_INFO,
			BasicTypeInfo.LONG_TYPE_INFO,
			BasicTypeInfo.STRING_TYPE_INFO};
	String[] names = {"a", "b", "c"};

	RowTypeInfo typeInfo = new RowTypeInfo(types, names);

	DataStream<Row> ds = env.fromCollection(data).returns(typeInfo);

	Table in = tableEnv.fromDataStream(ds, "a,b,c");
	tableEnv.registerTable("MyTableRow", in);

	String sqlQuery = "SELECT a,c FROM MyTableRow";
	Table result = tableEnv.sql(sqlQuery);

	DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
	resultSet.addSink(new StreamITCase.StringSink<Row>());
	env.execute();

	List<String> expected = new ArrayList<>();
	expected.add("1,Hi");
	expected.add("2,Hello");
	expected.add("3,Hello world");

	StreamITCase.compareWithList(expected);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:39,代码来源:SqlITCase.java

示例12: testUnion

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
@Test
public void testUnion() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
	StreamITCase.clear();

	DataStream<Tuple3<Integer, Long, String>> ds1 = StreamTestData.getSmall3TupleDataSet(env);
	Table t1 = tableEnv.fromDataStream(ds1, "a,b,c");
	tableEnv.registerTable("T1", t1);

	DataStream<Tuple5<Integer, Long, Integer, String, Long>> ds2 = StreamTestData.get5TupleDataStream(env);
	tableEnv.registerDataStream("T2", ds2, "a, b, d, c, e");

	String sqlQuery = "SELECT * FROM T1 " +
						"UNION ALL " +
						"(SELECT a, b, c FROM T2 WHERE a	< 3)";
	Table result = tableEnv.sql(sqlQuery);

	DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
	resultSet.addSink(new StreamITCase.StringSink<Row>());
	env.execute();

	List<String> expected = new ArrayList<>();
	expected.add("1,1,Hi");
	expected.add("2,2,Hello");
	expected.add("3,2,Hello world");
	expected.add("1,1,Hallo");
	expected.add("2,2,Hallo Welt");
	expected.add("2,3,Hallo Welt wie");

	StreamITCase.compareWithList(expected);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:33,代码来源:SqlITCase.java

示例13: testRowRegisterRowWithNames

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
@Test
public void testRowRegisterRowWithNames() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
	StreamITCase.clear();

	List<Row> data = new ArrayList<>();
	data.add(Row.of(1, 1L, "Hi"));
	data.add(Row.of(2, 2L, "Hello"));
	data.add(Row.of(3, 2L, "Hello world"));

	TypeInformation<?>[] types = {
			BasicTypeInfo.INT_TYPE_INFO,
			BasicTypeInfo.LONG_TYPE_INFO,
			BasicTypeInfo.STRING_TYPE_INFO};
	String[] names = {"a", "b", "c"};

	RowTypeInfo typeInfo = new RowTypeInfo(types, names);

	DataStream<Row> ds = env.fromCollection(data).returns(typeInfo);

	Table in = tableEnv.fromDataStream(ds, "a,b,c");
	tableEnv.registerTable("MyTableRow", in);

	String sqlQuery = "SELECT a,c FROM MyTableRow";
	Table result = tableEnv.sqlQuery(sqlQuery);

	DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
	resultSet.addSink(new StreamITCase.StringSink<Row>());
	env.execute();

	List<String> expected = new ArrayList<>();
	expected.add("1,Hi");
	expected.add("2,Hello");
	expected.add("3,Hello world");

	StreamITCase.compareWithList(expected);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:39,代码来源:JavaSqlITCase.java

示例14: testUnion

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
@Test
public void testUnion() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
	StreamITCase.clear();

	DataStream<Tuple3<Integer, Long, String>> ds1 = JavaStreamTestData.getSmall3TupleDataSet(env);
	Table t1 = tableEnv.fromDataStream(ds1, "a,b,c");
	tableEnv.registerTable("T1", t1);

	DataStream<Tuple5<Integer, Long, Integer, String, Long>> ds2 = JavaStreamTestData.get5TupleDataStream(env);
	tableEnv.registerDataStream("T2", ds2, "a, b, d, c, e");

	String sqlQuery = "SELECT * FROM T1 " +
						"UNION ALL " +
						"(SELECT a, b, c FROM T2 WHERE a	< 3)";
	Table result = tableEnv.sqlQuery(sqlQuery);

	DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
	resultSet.addSink(new StreamITCase.StringSink<Row>());
	env.execute();

	List<String> expected = new ArrayList<>();
	expected.add("1,1,Hi");
	expected.add("2,2,Hello");
	expected.add("3,2,Hello world");
	expected.add("1,1,Hallo");
	expected.add("2,2,Hallo Welt");
	expected.add("2,3,Hallo Welt wie");

	StreamITCase.compareWithList(expected);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:33,代码来源:JavaSqlITCase.java

示例15: JobCompiler

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入依赖的package包/类
JobCompiler(StreamTableEnvironment env, JobDescriptor job) {
  this.job = job;
  this.env = env;
}
 
开发者ID:uber,项目名称:AthenaX,代码行数:5,代码来源:JobCompiler.java


注:本文中的org.apache.flink.table.api.java.StreamTableEnvironment类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。