当前位置: 首页>>代码示例>>Java>>正文


Java StreamTableEnvironment.registerTableSource方法代码示例

本文整理汇总了Java中org.apache.flink.table.api.java.StreamTableEnvironment.registerTableSource方法的典型用法代码示例。如果您正苦于以下问题:Java StreamTableEnvironment.registerTableSource方法的具体用法?Java StreamTableEnvironment.registerTableSource怎么用?Java StreamTableEnvironment.registerTableSource使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.table.api.java.StreamTableEnvironment的用法示例。


在下文中一共展示了StreamTableEnvironment.registerTableSource方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: tcFlinkAvroSQL

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void tcFlinkAvroSQL(String KafkaServerHostPort, String SchemaRegistryHostPort,
                                  String srcTopic, String targetTopic,
                                  String consumerGroupId, String sinkKeys, String sqlState) {

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);

    Properties properties = new Properties();
    properties.setProperty(ConstantApp.PK_KAFKA_HOST_PORT.replace("_", "."), KafkaServerHostPort);
    properties.setProperty(ConstantApp.PK_KAFKA_CONSUMER_GROURP, consumerGroupId);
    properties.setProperty(ConstantApp.PK_KAFKA_SCHEMA_REGISTRY_HOST_PORT.replace("_", "."), SchemaRegistryHostPort);
    properties.setProperty(ConstantApp.PK_FLINK_TABLE_SINK_KEYS, sinkKeys);

    String[] srcTopicList = srcTopic.split(",");
    for (int i = 0; i < srcTopicList.length; i++) {
        properties.setProperty(ConstantApp.PK_SCHEMA_SUB_INPUT, srcTopicList[i]);
        properties.setProperty(ConstantApp.PK_SCHEMA_ID_INPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT) + "");
        properties.setProperty(ConstantApp.PK_SCHEMA_STR_INPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT).toString());
        tableEnv.registerTableSource(srcTopicList[i], new Kafka010AvroTableSource(srcTopicList[i], properties));
    }

    try {
        Table result = tableEnv.sql(sqlState);
        SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic, result);
        // For old producer, we need to create topic-value subject as well
        SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic + "-value", result);

        // delivered properties for sink
        properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, targetTopic);
        properties.setProperty(ConstantApp.PK_SCHEMA_ID_OUTPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT) + "");
        properties.setProperty(ConstantApp.PK_SCHEMA_STR_OUTPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT).toString());

        Kafka09AvroTableSink avro_sink =
                new Kafka09AvroTableSink(targetTopic, properties, new FlinkFixedPartitioner());
        result.writeToSink(avro_sink);
        env.execute("DF_FlinkSQL_Client_" + srcTopic + "-" + targetTopic);
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:41,代码来源:FlinkAvroSQLClient.java

示例2: testFlinkAvroSQL

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void testFlinkAvroSQL() {
    System.out.println("TestCase_Test Avro SQL");
    String resultFile = "/home/vagrant/test.txt";

    String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath();
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath)
            .setParallelism(1);
    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
    Properties properties = new Properties();
    properties.setProperty("bootstrap.servers", "localhost:9092");
    properties.setProperty("group.id", "consumer_test");
    properties.setProperty("schema.subject", "test-value");
    properties.setProperty("schema.registry", "localhost:8081");
    properties.setProperty("static.avro.schema", "empty_schema");

    try {
        Kafka09AvroTableSource kafkaAvroTableSource =  new Kafka09AvroTableSource("test", properties);
        tableEnv.registerTableSource("Orders", kafkaAvroTableSource);

        //Table result = tableEnv.sql("SELECT STREAM name, symbol, exchange FROM Orders");
        Table result = tableEnv.sql("SELECT name, symbol, exchangecode FROM Orders");

        Files.deleteIfExists(Paths.get(resultFile));

        // create a TableSink
        TableSink sink = new CsvTableSink(resultFile, "|");
        // write the result Table to the TableSink
        result.writeToSink(sink);
        env.execute("Flink AVRO SQL KAFKA Test");
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:34,代码来源:UnitTestSuiteFlink.java

示例3: testFlinkAvroSQLJson

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void testFlinkAvroSQLJson() {
    System.out.println("TestCase_Test Avro SQL to Json Sink");
    final String STATIC_USER_SCHEMA = "{"
            + "\"type\":\"record\","
            + "\"name\":\"myrecord\","
            + "\"fields\":["
            + "  { \"name\":\"symbol\", \"type\":\"string\" },"
            + "  { \"name\":\"name\", \"type\":\"string\" },"
            + "  { \"name\":\"exchangecode\", \"type\":\"string\" }"
            + "]}";

    String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath();
    DFRemoteStreamEnvironment env = new DFRemoteStreamEnvironment("localhost", 6123, jarPath)
            .setParallelism(1);
    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
    Properties properties = new Properties();
    properties.setProperty("bootstrap.servers", "localhost:9092");
    properties.setProperty("group.id", "consumer_test");
    properties.setProperty("schema.subject", "test-value");
    properties.setProperty("schema.registry", "localhost:8081");
    properties.setProperty("useAvro", "avro");
    properties.setProperty("static.avro.schema",
            SchemaRegistryClient.getSchemaFromRegistry("http://localhost:8081", "test-value", "latest").toString());

    try {
        HashMap<String, String> hm = new HashMap<>();
        Kafka09AvroTableSource kafkaAvroTableSource =  new Kafka09AvroTableSource("test", properties);
        tableEnv.registerTableSource("Orders", kafkaAvroTableSource);

        Table result = tableEnv.sql("SELECT name, symbol, exchangecode FROM Orders");
        //Kafka09JsonTableSink json_sink = new Kafka09JsonTableSink ("test_json", properties, new FlinkFixedPartitioner());
        Kafka09AvroTableSink json_sink = new Kafka09AvroTableSink ("test_json", properties, new FlinkFixedPartitioner());

        // write the result Table to the TableSink
        result.writeToSink(json_sink);
        env.executeWithDFObj("Flink AVRO SQL KAFKA Test", new DFJobPOPJ().setJobConfig(hm) );
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:41,代码来源:UnitTestSuiteFlink.java

示例4: main

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void main(String args[]) {

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);

		// Create a DataStream from a list of elements
		//DataStream<Integer> ds = env.fromElements(1, 2, 3, 4, 5);

		CsvTableSource csvTableSource = new CsvTableSource(
				"/Users/will/Downloads/file.csv",
				new String[] { "name", "id", "score", "comments" },
				new TypeInformation<?>[] {
						Types.STRING(),
						Types.STRING(),
						Types.STRING(),
						Types.STRING()
				}); // lenient

		tableEnv.registerTableSource("mycsv", csvTableSource);



		TableSink sink = new CsvTableSink("/Users/will/Downloads/out.csv", "|");


		//tableEnv.registerDataStream("tbl", ds, "a");
		//Table ingest = tableEnv.fromDataStream(ds, "name");
		Table in = tableEnv.scan("mycsv");
		//Table in = tableEnv.ingest("tbl");
		//Table in = tableEnv.fromDataStream(ds, "a");

		Table result = in.select("name");
		result.writeToSink(sink);
		try {
			env.execute();
		} catch (Exception e) {

		}

		System.out.print("DONE");
	}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:42,代码来源:WordCountStream.java

示例5: testEndToEnd

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
/**
 * Tests the end-to-end functionality of table source & sink.
 *
 * <p>This test uses the {@link FlinkPravegaTableSink} to emit an in-memory table
 * containing sample data as a Pravega stream of 'append' events (i.e. as a changelog).
 * The test then uses the {@link FlinkPravegaTableSource} to absorb the changelog as a new table.
 *
 * <p>Flink's ability to convert POJOs (e.g. {@link SampleRecord}) to/from table rows is also demonstrated.
 *
 * <p>Because the source is unbounded, the test must throw an exception to deliberately terminate the job.
 *
 * @throws Exception on exception
 */
@Test
public void testEndToEnd() throws Exception {

    // create a Pravega stream for test purposes
    StreamId stream = new StreamId(setupUtils.getScope(), "FlinkTableITCase.testEndToEnd");
    this.setupUtils.createTestStream(stream.getName(), 1);

    // create a Flink Table environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment().setParallelism(1);
    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);

    // define a table of sample data from a collection of POJOs.  Schema:
    // root
    //  |-- category: String
    //  |-- value: Integer
    Table table = tableEnv.fromDataStream(env.fromCollection(SAMPLES));

    // write the table to a Pravega stream (using the 'category' column as a routing key)
    FlinkPravegaTableSink sink = new FlinkPravegaTableSink(
            this.setupUtils.getControllerUri(), stream, JsonRowSerializationSchema::new, "category");
    table.writeToSink(sink);

    // register the Pravega stream as a table called 'samples'
    FlinkPravegaTableSource source = new FlinkPravegaTableSource(
            this.setupUtils.getControllerUri(), stream, 0, JsonRowDeserializationSchema::new, SAMPLE_SCHEMA);
    tableEnv.registerTableSource("samples", source);

    // select some sample data from the Pravega-backed table, as a view
    Table view = tableEnv.sql("SELECT * FROM samples WHERE category IN ('A','B')");

    // write the view to a test sink that verifies the data for test purposes
    tableEnv.toAppendStream(view, SampleRecord.class).addSink(new TestSink(SAMPLES));

    // execute the topology
    try {
        env.execute();
        Assert.fail("expected an exception");
    } catch (JobExecutionException e) {
        // we expect the job to fail because the test sink throws a deliberate exception.
        Assert.assertTrue(e.getCause() instanceof TestCompletionException);
    }
}
 
开发者ID:pravega,项目名称:flink-connectors,代码行数:56,代码来源:FlinkTableITCase.java

示例6: tcFlinkAvroTableAPI

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void tcFlinkAvroTableAPI(String KafkaServerHostPort, String SchemaRegistryHostPort,
                                  String srcTopic, String targetTopic,
                                  String consumerGroupId, String sinkKeys, String transScript) {

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);

    Properties properties = new Properties();
    properties.setProperty(ConstantApp.PK_KAFKA_HOST_PORT.replace("_", "."), KafkaServerHostPort);
    properties.setProperty(ConstantApp.PK_KAFKA_CONSUMER_GROURP, consumerGroupId);
    properties.setProperty(ConstantApp.PK_KAFKA_SCHEMA_REGISTRY_HOST_PORT.replace("_", "."), SchemaRegistryHostPort);
    properties.setProperty(ConstantApp.PK_FLINK_TABLE_SINK_KEYS, sinkKeys);

    String[] srcTopicList = srcTopic.split(",");
    for (int i = 0; i < srcTopicList.length; i++) {
        properties.setProperty(ConstantApp.PK_SCHEMA_SUB_INPUT, srcTopicList[i]);
        properties.setProperty(ConstantApp.PK_SCHEMA_ID_INPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT) + "");
        properties.setProperty(ConstantApp.PK_SCHEMA_STR_INPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT).toString());
        tableEnv.registerTableSource(srcTopic, new Kafka010AvroTableSource(srcTopicList[i], properties));
    }

    try {
        Table result;
        Table ingest = tableEnv.scan(srcTopic);
        String className = "dynamic.FlinkScript";
        String header = "package dynamic;\n" +
                "import org.apache.flink.table.api.Table;\n" +
                "import com.datafibers.util.*;\n";
        String javaCode = header +
                "public class FlinkScript implements DynamicRunner {\n" +
                "@Override \n" +
                "    public Table transTableObj(Table tbl) {\n" +
                "try {" +
                "return tbl." + transScript + ";\n" +
                "} catch (Exception e) {" +
                "};" +
                "return null;}}";
        // Dynamic code generation
        Class aClass = CompilerUtils.CACHED_COMPILER.loadFromJava(className, javaCode);
        DynamicRunner runner = (DynamicRunner) aClass.newInstance();
        result = runner.transTableObj(ingest);

        SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic, result);
        // delivered properties for sink
        properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, targetTopic);
        properties.setProperty(ConstantApp.PK_SCHEMA_ID_OUTPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT) + "");
        properties.setProperty(ConstantApp.PK_SCHEMA_STR_OUTPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT).toString());

        Kafka09AvroTableSink avro_sink =
                new Kafka09AvroTableSink(targetTopic, properties, new FlinkFixedPartitioner());
        result.writeToSink(avro_sink);
        env.execute("DF_FlinkTableAPI_Client_" + srcTopic + "-" + targetTopic);
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:57,代码来源:FlinkAvroTableAPIClient.java

示例7: tcFlinkAvroSQL

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void tcFlinkAvroSQL(String SchemaRegistryHostPort, String srcTopic, String targetTopic, String sqlState) {
    System.out.println("tcFlinkAvroSQL");
    String resultFile = "testResult";

    String jarPath = "C:/Users/dadu/Coding/df_data_service/target/df-data-service-1.1-SNAPSHOT-fat.jar";
    //String jarPath = "/Users/will/Documents/Coding/GitHub/df_data_service/target/df-data-service-1.1-SNAPSHOT-fat.jar";
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath)
            .setParallelism(1);
    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);

    Properties properties = new Properties();
    properties.setProperty(ConstantApp.PK_KAFKA_HOST_PORT.replace("_", "."), "localhost:9092");
    properties.setProperty(ConstantApp.PK_KAFKA_CONSUMER_GROURP, "consumer_test");
    //properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, "test");
    properties.setProperty(ConstantApp.PK_KAFKA_SCHEMA_REGISTRY_HOST_PORT.replace("_", "."), SchemaRegistryHostPort);
    properties.setProperty(ConstantApp.PK_FLINK_TABLE_SINK_KEYS, "symbol");

    String[] srcTopicList = srcTopic.split(",");
    for (int i = 0; i < srcTopicList.length; i++) {
        properties.setProperty(ConstantApp.PK_SCHEMA_SUB_INPUT, srcTopicList[i]);
        properties.setProperty(ConstantApp.PK_SCHEMA_ID_INPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT) + "");
        properties.setProperty(ConstantApp.PK_SCHEMA_STR_INPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT).toString());
        tableEnv.registerTableSource(srcTopicList[i], new Kafka010AvroTableSource(srcTopicList[i], properties));
    }

    try {
        Table result = tableEnv.sql(sqlState);
        result.printSchema();
        System.out.println("generated avro schema is = " + SchemaRegistryClient.tableAPIToAvroSchema(result, targetTopic));
        SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic, result);

        // delivered properties
        properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, targetTopic);
        properties.setProperty(ConstantApp.PK_SCHEMA_ID_OUTPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT) + "");
        properties.setProperty(ConstantApp.PK_SCHEMA_STR_OUTPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT).toString());

        System.out.println(Paths.get(resultFile).toAbsolutePath());
        Kafka09AvroTableSink avro_sink =
                new Kafka09AvroTableSink(targetTopic, properties, new FlinkFixedPartitioner());
        result.writeToSink(avro_sink);
        //result.writeToSink(new CsvTableSink(resultFile, "|", 1, FileSystem.WriteMode.OVERWRITE));
        env.execute("tcFlinkAvroSQL");
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:47,代码来源:TCFlinkAvroSQL.java

示例8: main

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void main(String args[]) {

		String transform = "flatMap(new FlinkUDF.LineSplitter()).groupBy(0).sum(1).print();\n";

		String transform2 = "select(\"name\");\n";

		String header = "package dynamic;\n" +
				"import org.apache.flink.api.table.Table;\n" +
				"import com.datafibers.util.*;\n";

		String javaCode = header +
				"public class FlinkScript implements DynamicRunner {\n" +
				"@Override \n" +
				"    public void runTransform(DataSet<String> ds) {\n" +
						"try {" +
						"ds."+ transform +
						"} catch (Exception e) {" +
						"};" +
				"}}";

		String javaCode2 = header +
				"public class FlinkScript implements DynamicRunner {\n" +
				"@Override \n" +
				"    public Table transTableObj(Table tbl) {\n" +
					"try {" +
					"return tbl."+ transform2 +
					"} catch (Exception e) {" +
					"};" +
					"return null;}}";

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
		CsvTableSource csvTableSource = new CsvTableSource(
				"/Users/will/Downloads/file.csv",
				new String[] { "name", "id", "score", "comments" },
				new TypeInformation<?>[] {
						Types.STRING(),
						Types.STRING(),
						Types.STRING(),
						Types.STRING()
				}); // lenient

		tableEnv.registerTableSource("mycsv", csvTableSource);
		TableSink sink = new CsvTableSink("/Users/will/Downloads/out.csv", "|");
		Table ingest = tableEnv.scan("mycsv");

		try {
			String className = "dynamic.FlinkScript";
			Class aClass = CompilerUtils.CACHED_COMPILER.loadFromJava(className, javaCode2);
			DynamicRunner runner = (DynamicRunner) aClass.newInstance();
			//runner.runTransform(ds);
			Table result = runner.transTableObj(ingest);
			// write the result Table to the TableSink
			result.writeToSink(sink);
			env.execute();

		} catch (Exception e) {
			e.printStackTrace();
		}
	}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:61,代码来源:CodeGenFlinkTable.java

示例9: testFlinkSQL

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void testFlinkSQL() {

        LOG.info("Only Unit Testing Function is enabled");
        String resultFile = "/home/vagrant/test.txt";

        try {

            String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath();
            StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath)
                    .setParallelism(1);
            String kafkaTopic = "finance";
            String kafkaTopic_stage = "df_trans_stage_finance";
            String kafkaTopic_out = "df_trans_out_finance";



            StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
            Properties properties = new Properties();
            properties.setProperty("bootstrap.servers", "localhost:9092");
            properties.setProperty("group.id", "consumer3");

            // Internal covert Json String to Json - Begin
            DataStream<String> stream = env
                    .addSource(new FlinkKafkaConsumer09<>(kafkaTopic, new SimpleStringSchema(), properties));

            stream.map(new MapFunction<String, String>() {
                @Override
                public String map(String jsonString) throws Exception {
                    return jsonString.replaceAll("\\\\", "").replace("\"{", "{").replace("}\"","}");
                }
            }).addSink(new FlinkKafkaProducer09<String>("localhost:9092", kafkaTopic_stage, new SimpleStringSchema()));
            // Internal covert Json String to Json - End

            String[] fieldNames =  new String[] {"name"};
            Class<?>[] fieldTypes = new Class<?>[] {String.class};

            Kafka09AvroTableSource kafkaTableSource = new Kafka09AvroTableSource(
                    kafkaTopic_stage,
                    properties,
                    fieldNames,
                    fieldTypes);

            //kafkaTableSource.setFailOnMissingField(true);

            tableEnv.registerTableSource("Orders", kafkaTableSource);

            //Table result = tableEnv.sql("SELECT STREAM name FROM Orders");
            Table result = tableEnv.sql("SELECT name FROM Orders");

            Files.deleteIfExists(Paths.get(resultFile));

            // create a TableSink
            TableSink sink = new CsvTableSink(resultFile, "|");
            // write the result Table to the TableSink
            result.writeToSink(sink);

            env.execute("FlinkConsumer");

        } catch (Exception e) {
            e.printStackTrace();
        }
    }
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:63,代码来源:UnitTestSuiteFlink.java

示例10: testFlinkAvroSQLWithStaticSchema

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void testFlinkAvroSQLWithStaticSchema() {
    System.out.println("TestCase_Test Avro SQL with static Schema");

    final String STATIC_USER_SCHEMA = "{"
            + "\"type\":\"record\","
            + "\"name\":\"myrecord\","
            + "\"fields\":["
            + "  { \"name\":\"symbol\", \"type\":\"string\" },"
            + "  { \"name\":\"name\", \"type\":\"string\" },"
            + "  { \"name\":\"exchangecode\", \"type\":\"string\" }"
            + "]}";
    String resultFile = "/home/vagrant/test.txt";

    String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath();
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath)
            .setParallelism(1);
    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
    Properties properties = new Properties();
    properties.setProperty("bootstrap.servers", "localhost:9092");
    properties.setProperty("group.id", "consumer_test");
    properties.setProperty("schema.subject", "test-value");
    properties.setProperty("schema.registry", "localhost:8081");
    properties.setProperty("static.avro.schema", STATIC_USER_SCHEMA);

    try {
        Kafka09AvroTableSource kafkaAvroTableSource =  new Kafka09AvroTableSource("test", properties);
        tableEnv.registerTableSource("Orders", kafkaAvroTableSource);

        //Table result = tableEnv.sql("SELECT STREAM name, symbol, exchange FROM Orders");
        Table result = tableEnv.sql("SELECT name, symbol, exchangecode FROM Orders");

        Files.deleteIfExists(Paths.get(resultFile));

        // create a TableSink
        TableSink sink = new CsvTableSink(resultFile, "|");
        // write the result Table to the TableSink
        result.writeToSink(sink);
        env.execute("Flink AVRO SQL KAFKA Test");
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:43,代码来源:UnitTestSuiteFlink.java

示例11: testFlinkAvroScriptWithStaticSchema

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void testFlinkAvroScriptWithStaticSchema() {
    System.out.println("TestCase_Test Avro Table API Script with static Schema");

    final String STATIC_USER_SCHEMA = "{"
            + "\"type\":\"record\","
            + "\"name\":\"myrecord\","
            + "\"fields\":["
            + "  { \"name\":\"symbol\", \"type\":\"string\" },"
            + "  { \"name\":\"name\", \"type\":\"string\" },"
            + "  { \"name\":\"exchangecode\", \"type\":\"string\" }"
            + "]}";

    String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath();
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath)
            .setParallelism(1);
    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
    Properties properties = new Properties();
    properties.setProperty("bootstrap.servers", "localhost:9092");
    properties.setProperty("group.id", "consumer_test");
    properties.setProperty("schema.subject", "test-value");
    properties.setProperty("schema.registry", "localhost:8081");
    properties.setProperty("static.avro.schema", STATIC_USER_SCHEMA);

    try {
        Kafka09AvroTableSource kafkaAvroTableSource =  new Kafka09AvroTableSource("test", properties);
        tableEnv.registerTableSource("Orders", kafkaAvroTableSource);

        Table ingest = tableEnv.scan("Orders");

        String className = "dynamic.FlinkScript";

        String header = "package dynamic;\n" +
                "import org.apache.flink.table.api.Table;\n" +
                "import com.datafibers.util.*;\n";

        String transScript = "select(\"name\")";

        String javaCode = header +
                "public class FlinkScript implements DynamicRunner {\n" +
                "@Override \n" +
                "    public Table transTableObj(Table tbl) {\n" +
                "try {" +
                "return tbl."+ transScript + ";" +
                "} catch (Exception e) {" +
                "};" +
                "return null;}}";

        // Dynamic code generation
        Class aClass = CompilerUtils.CACHED_COMPILER.loadFromJava(className, javaCode);
        DynamicRunner runner = (DynamicRunner) aClass.newInstance();
        Table result = runner.transTableObj(ingest);

        Kafka09AvroTableSink sink =
                new Kafka09AvroTableSink ("test_json", properties, new FlinkFixedPartitioner());
        // write the result Table to the TableSink
        result.writeToSink(sink);
        env.execute("Flink AVRO SQL KAFKA Test");
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:62,代码来源:UnitTestSuiteFlink.java

示例12: main

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		// read parameters
		ParameterTool params = ParameterTool.fromArgs(args);
		String input = params.getRequired("input");

		final int maxEventDelay = 60;       // events are out of order by max 60 seconds
		final int servingSpeedFactor = 600; // events of 10 minutes are served in 1 second

		// set up streaming execution environment
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

		// create a TableEnvironment
		StreamTableEnvironment tEnv = TableEnvironment.getTableEnvironment(env);

		// register TaxiRideTableSource as table "TaxiRides"
		tEnv.registerTableSource(
				"TaxiRides",
				new TaxiRideTableSource(
						input,
						maxEventDelay,
						servingSpeedFactor));

		// register user-defined functions
		tEnv.registerFunction("isInNYC", new GeoUtils.IsInNYC());
		tEnv.registerFunction("toCellId", new GeoUtils.ToCellId());
		tEnv.registerFunction("toCoords", new GeoUtils.ToCoords());

		Table results = tEnv.sqlQuery(
			"SELECT " +
				"toCoords(cell), wstart, wend, isStart, popCnt " +
			"FROM " +
				"(SELECT " +
					"cell, " +
					"isStart, " +
					"HOP_START(eventTime, INTERVAL '5' MINUTE, INTERVAL '15' MINUTE) AS wstart, " +
					"HOP_END(eventTime, INTERVAL '5' MINUTE, INTERVAL '15' MINUTE) AS wend, " +
					"COUNT(isStart) AS popCnt " +
				"FROM " +
					"(SELECT " +
						"eventTime, " +
						"isStart, " +
						"CASE WHEN isStart THEN toCellId(startLon, startLat) ELSE toCellId(endLon, endLat) END AS cell " +
					"FROM TaxiRides " +
					"WHERE isInNYC(startLon, startLat) AND isInNYC(endLon, endLat)) " +
				"GROUP BY cell, isStart, HOP(eventTime, INTERVAL '5' MINUTE, INTERVAL '15' MINUTE)) " +
			"WHERE popCnt > 20"
			);

		// convert Table into an append stream and print it
		// (if instead we needed a retraction stream we would use tEnv.toRetractStream)
		tEnv.toAppendStream(results, Row.class).print();

		// execute query
		env.execute();
	}
 
开发者ID:dataArtisans,项目名称:flink-training-exercises,代码行数:58,代码来源:PopularPlacesSql.java

示例13: main

import org.apache.flink.table.api.java.StreamTableEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		// read parameters
		ParameterTool params = ParameterTool.fromArgs(args);
		String input = params.getRequired("input");

		final int maxEventDelay = 60;       // events are out of order by max 60 seconds
		final int servingSpeedFactor = 600; // events of 10 minutes are served in 1 second

		// set up streaming execution environment
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

		// create a TableEnvironment
		StreamTableEnvironment tEnv = TableEnvironment.getTableEnvironment(env);

		// register TaxiRideTableSource as table "TaxiRides"
		tEnv.registerTableSource(
				"TaxiRides",
				new TaxiRideTableSource(
						input,
						maxEventDelay,
						servingSpeedFactor));

		// register user-defined functions
		tEnv.registerFunction("isInNYC", new GeoUtils.IsInNYC());
		tEnv.registerFunction("toCellId", new GeoUtils.ToCellId());
		tEnv.registerFunction("toCoords", new GeoUtils.ToCoords());

		Table popPlaces = tEnv
				// scan TaxiRides table
				.scan("TaxiRides")
				// filter for valid rides
				.filter("isInNYC(startLon, startLat) && isInNYC(endLon, endLat)")
				// select fields and compute grid cell of departure or arrival coordinates
				.select("eventTime, " +
						"isStart, " +
						"(isStart = true).?(toCellId(startLon, startLat), toCellId(endLon, endLat)) AS cell")
				// specify sliding window over 15 minutes with slide of 5 minutes
				.window(Slide.over("15.minutes").every("5.minutes").on("eventTime").as("w"))
				// group by cell, isStart, and window
				.groupBy("cell, isStart, w")
				// count departures and arrivals per cell (location) and window (time)
				.select("cell, isStart, w.start AS start, w.end AS end, count(isStart) AS popCnt")
				// filter for popular places
				.filter("popCnt > 20")
				// convert cell back to coordinates
				.select("toCoords(cell) AS location, start, end, isStart, popCnt");

		// convert Table into an append stream and print it
		tEnv.toAppendStream(popPlaces, Row.class).print();

		// execute query
		env.execute();
	}
 
开发者ID:dataArtisans,项目名称:flink-training-exercises,代码行数:56,代码来源:PopularPlacesTableApi.java


注:本文中的org.apache.flink.table.api.java.StreamTableEnvironment.registerTableSource方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。