当前位置: 首页>>代码示例>>Java>>正文


Java DataStream.addSink方法代码示例

本文整理汇总了Java中org.apache.flink.streaming.api.datastream.DataStream.addSink方法的典型用法代码示例。如果您正苦于以下问题:Java DataStream.addSink方法的具体用法?Java DataStream.addSink怎么用?Java DataStream.addSink使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.streaming.api.datastream.DataStream的用法示例。


在下文中一共展示了DataStream.addSink方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	Properties properties = new Properties();
	properties.setProperty("bootstrap.servers", "localhost:9092");
	properties.setProperty("zookeeper.connect", "localhost:2181");
	properties.setProperty("group.id", "test");
	properties.setProperty("auto.offset.reset", "latest");
	FlinkKafkaConsumer08<DeviceEvent> flinkKafkaConsumer08 = new FlinkKafkaConsumer08<>("device-data",
			new DeviceSchema(), properties);

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<DeviceEvent> messageStream = env.addSource(flinkKafkaConsumer08);
	
	Map<String, String> config = new HashMap<>();
	config.put("cluster.name", "my-application");
	// This instructs the sink to emit after every element, otherwise they would be buffered
	config.put("bulk.flush.max.actions", "1");

	List<InetSocketAddress> transportAddresses = new ArrayList<>();
	transportAddresses.add(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9300));

	messageStream.addSink(new ElasticsearchSink<DeviceEvent>(config, transportAddresses, new ESSink()));
	env.execute();
}
 
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:24,代码来源:FlinkESConnector.java

示例2: testFilter

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
public void testFilter() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
	StreamITCase.clear();

	DataStream<Tuple5<Integer, Long, Integer, String, Long>> ds = JavaStreamTestData.get5TupleDataStream(env);
	tableEnv.registerDataStream("MyTable", ds, "a, b, c, d, e");

	String sqlQuery = "SELECT a, b, e FROM MyTable WHERE c < 4";
	Table result = tableEnv.sqlQuery(sqlQuery);

	DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
	resultSet.addSink(new StreamITCase.StringSink<Row>());
	env.execute();

	List<String> expected = new ArrayList<>();
	expected.add("1,1,1");
	expected.add("2,2,2");
	expected.add("2,3,1");
	expected.add("3,4,2");

	StreamITCase.compareWithList(expected);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:JavaSqlITCase.java

示例3: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	ParameterTool parameterTool = ParameterTool.fromArgs(args);
	if (parameterTool.getNumberOfParameters() < 2) {
		System.out.println("Missing parameters!");
		System.out.println("Usage: Kafka --topic <topic> --bootstrap.servers <kafka brokers>");
		return;
	}

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.getConfig().disableSysoutLogging();
	env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));

	// very simple data generator
	DataStream<String> messageStream = env.addSource(new SourceFunction<String>() {
		private static final long serialVersionUID = 6369260445318862378L;
		public boolean running = true;

		@Override
		public void run(SourceContext<String> ctx) throws Exception {
			long i = 0;
			while (this.running) {
				ctx.collect("Element - " + i++);
				Thread.sleep(500);
			}
		}

		@Override
		public void cancel() {
			running = false;
		}
	});

	// write data into Kafka
	messageStream.addSink(new FlinkKafkaProducer08<>(parameterTool.getRequired("topic"), new SimpleStringSchema(), parameterTool.getProperties()));

	env.execute("Write into Kafka example");
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:38,代码来源:WriteIntoKafka.java

示例4: testSelect

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
public void testSelect() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
	StreamITCase.clear();

	DataStream<Tuple3<Integer, Long, String>> ds = StreamTestData.getSmall3TupleDataSet(env);
	Table in = tableEnv.fromDataStream(ds, "a,b,c");
	tableEnv.registerTable("MyTable", in);

	String sqlQuery = "SELECT * FROM MyTable";
	Table result = tableEnv.sql(sqlQuery);

	DataStream<Row> resultSet = tableEnv.toDataStream(result, Row.class);
	resultSet.addSink(new StreamITCase.StringSink());
	env.execute();

	List<String> expected = new ArrayList<>();
	expected.add("1,1,Hi");
	expected.add("2,2,Hello");
	expected.add("3,2,Hello world");

	StreamITCase.compareWithList(expected);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:SqlITCase.java

示例5: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	env.enableCheckpointing(20);
	env.setParallelism(12);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	BucketingSink<String> sink = new BucketingSink<String>(outPath)
			.setBucketer(new BasePathBucketer<String>())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:BucketingSinkFaultToleranceITCase.java

示例6: testSelect

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
public void testSelect() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
	StreamITCase.clear();

	DataStream<Tuple3<Integer, Long, String>> ds = StreamTestData.getSmall3TupleDataSet(env);
	Table in = tableEnv.fromDataStream(ds, "a,b,c");
	tableEnv.registerTable("MyTable", in);

	String sqlQuery = "SELECT * FROM MyTable";
	Table result = tableEnv.sql(sqlQuery);

	DataStream<Row> resultSet = tableEnv.toAppendStream(result, Row.class);
	resultSet.addSink(new StreamITCase.StringSink<Row>());
	env.execute();

	List<String> expected = new ArrayList<>();
	expected.add("1,1,Hi");
	expected.add("2,2,Hello");
	expected.add("3,2,Hello world");

	StreamITCase.compareWithList(expected);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:SqlITCase.java

示例7: shouldSelectFromStreamUsingAnonymousClassSelect

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
@SuppressWarnings("Convert2Lambda")
public void shouldSelectFromStreamUsingAnonymousClassSelect() throws Exception {
    StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
    executionEnvironment.setParallelism(1);

    DataStream<TestEvent> dataStream = executionEnvironment.fromElements(new TestEvent("peter", 10), new TestEvent("alex", 25), new TestEvent("maria", 30));

    EsperStream<TestEvent> esperStream = Esper.query(dataStream, "select name, age from TestEvent");

    DataStream<TestEvent> resultStream = esperStream.select(new EsperSelectFunction<TestEvent>() {
        private static final long serialVersionUID = 8802852465465541287L;

        @Override
        public TestEvent select(EventBean eventBean) throws Exception {
            String name = (String) eventBean.get("name");
            int age = (int) eventBean.get("age");
            return new TestEvent(name, age);
        }
    });

    resultStream.addSink(new SinkFunction<TestEvent>() {

        private static final long serialVersionUID = -8260794084029816089L;

        @Override
        public void invoke(TestEvent testEvent) throws Exception {
            System.err.println(testEvent);
            result.add(testEvent);
        }
    });

    executionEnvironment.execute("test-2");

    assertThat(result, is(notNullValue()));
    assertThat(result.size(), is(3));
}
 
开发者ID:phil3k3,项目名称:flink-esper,代码行数:38,代码来源:EsperQueryTest.java

示例8: shouldSelectFromStreamUsingLambdaSelect

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
@SuppressWarnings("Convert2Lambda")
public void shouldSelectFromStreamUsingLambdaSelect() throws Exception {

    StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
    executionEnvironment.setParallelism(1);

    DataStream<TestEvent> dataStream = executionEnvironment.fromElements(new TestEvent("peter1", 10), new TestEvent("alex1", 25), new TestEvent("maria1", 30));

    EsperStream<TestEvent> esperStream = Esper.query(dataStream, "select name, age from TestEvent");

    DataStream<TestEvent> resultStream = esperStream.select((EsperSelectFunction<TestEvent>) collector -> {
        String name = (String) collector.get("name");
        int age = (int) collector.get("age");
        return new TestEvent(name, age);
    });

    resultStream.addSink(new SinkFunction<TestEvent>() {

        private static final long serialVersionUID = 5588530728493738002L;

        @Override
        public void invoke(TestEvent testEvent) throws Exception {
            result.add(testEvent);
        }
    });

    executionEnvironment.execute("test-1");

    assertThat(result, is(notNullValue()));
    assertThat(result.size(), is(3));
}
 
开发者ID:phil3k3,项目名称:flink-esper,代码行数:33,代码来源:EsperQueryTest.java

示例9: shouldSelectFromStringDataStream

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
@SuppressWarnings("Convert2Lambda")
public void shouldSelectFromStringDataStream() throws Exception {
    StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
    executionEnvironment.setParallelism(1);

    List<String> expectedValues = Arrays.asList("first", "second");
    DataStream<String> dataStream = executionEnvironment.fromCollection(expectedValues);

    EsperStream<String> esperStream = Esper.query(dataStream, "select bytes from String");

    DataStream<String> resultStream = esperStream.select((EsperSelectFunction<String>) collector -> {
        byte[] bytes = (byte[]) collector.get("bytes");
        return new String(bytes);
    });

    resultStream.addSink(new SinkFunction<String>() {

        private static final long serialVersionUID = 284955963055337762L;

        @Override
        public void invoke(String testEvent) throws Exception {
            System.err.println(testEvent);
            stringResult.add(testEvent);
        }
    });

    executionEnvironment.execute("test-2");

    assertThat(stringResult, is(notNullValue()));
    assertThat(stringResult.size(), is(2));
    assertThat(stringResult, is(expectedValues));
}
 
开发者ID:phil3k3,项目名称:flink-esper,代码行数:34,代码来源:EsperQueryTest.java

示例10: testEsperPattern

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
public void testEsperPattern() throws Exception {
    StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
    executionEnvironment.setParallelism(1);

    List<ComplexEvent> expectedValues = Lists.newArrayList();
    ComplexEvent complexEvent = new ComplexEvent(Event.start(), Event.end());
    expectedValues.add(complexEvent);

    List<Event> events = Arrays.asList(complexEvent.getStartEvent(), complexEvent.getEndEvent());
    DataStream<Event> dataStream = executionEnvironment.fromCollection(events);

    EsperStream<Event> esperStream = Esper.pattern(dataStream, "every (A=Event(type='start') -> B=Event(type='end'))");

    DataStream<ComplexEvent> complexEventDataStream = esperStream.select(new EsperSelectFunction<ComplexEvent>() {
        @Override
        public ComplexEvent select(EventBean eventBean) throws Exception {
            return new ComplexEvent((Event) eventBean.get("A"), (Event) eventBean.get("B"));
        }
    });

    complexEventDataStream.addSink(new SinkFunction<ComplexEvent>() {
        @Override
        public void invoke(ComplexEvent value) throws Exception {
            System.err.println(value);
            resultingEvents.add(value);
        }
    });

    executionEnvironment.execute("test-2");

    assertThat(resultingEvents, is(expectedValues));
}
 
开发者ID:phil3k3,项目名称:flink-esper,代码行数:34,代码来源:EsperPatternTest.java

示例11: emitDataStream

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * NOTE: This method is for internal use only for defining a TableSink.
 *       Do not use it in Table API programs.
 */
@Override
public void emitDataStream(DataStream<Row> dataStream) {
    checkState(fieldNames != null, "Table sink is not configured");
    checkState(fieldTypes != null, "Table sink is not configured");
    checkState(serializationSchema != null, "Table sink is not configured");
    checkState(eventRouter != null, "Table sink is not configured");

    FlinkPravegaWriter<Row> writer = createFlinkPravegaWriter();
    dataStream.addSink(writer);
}
 
开发者ID:pravega,项目名称:flink-connectors,代码行数:15,代码来源:FlinkPravegaTableSink.java

示例12: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        final ParameterTool params = ParameterTool.fromArgs(args);
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.getConfig().setGlobalJobParameters(params);
        env.setParallelism(3);

        DataStream<String> simpleStringStream = env.addSource(new EventsGenerator());

        Properties configProps = new Properties();
        configProps.put(ConfigConstants.LOG_ENDPOINT, sEndpoint);
        configProps.put(ConfigConstants.LOG_ACCESSSKEYID, sAccessKeyId);
        configProps.put(ConfigConstants.LOG_ACCESSKEY, sAccessKey);
        configProps.put(ConfigConstants.LOG_PROJECT, sProject);
        configProps.put(ConfigConstants.LOG_LOGSTORE, sLogstore);

        FlinkLogProducer<String> logProducer = new FlinkLogProducer<String>(new SimpleLogSerializer(), configProps);
        logProducer.setCustomPartitioner(new LogPartitioner<String>() {
            @Override
            public String getHashKey(String element) {
                try {
                    MessageDigest md = MessageDigest.getInstance("MD5");
                    md.update(element.getBytes());
                    String hash = new BigInteger(1, md.digest()).toString(16);
                    while(hash.length() < 32) hash = "0" + hash;
                    return hash;
                } catch (NoSuchAlgorithmException e) {
                }
                return  "0000000000000000000000000000000000000000000000000000000000000000";
            }
        });
        simpleStringStream.addSink(logProducer);

        env.execute("flink log producer");
    }
 
开发者ID:aliyun,项目名称:aliyun-log-flink-connector,代码行数:36,代码来源:ProducerSample.java

示例13: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		TaxiRideCleansingParameterParser params = new TaxiRideCleansingParameterParser();
		// TODO: refactor this method
		if(!params.parseParams(args)){
			final String dataFilePath = params.getDataFilePath();

			// get an ExecutionEnvironment
			StreamExecutionEnvironment env =
					StreamExecutionEnvironment.getExecutionEnvironment();
			// configure event-time processing
			env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

			// get the taxi ride data stream
			DataStream<TaxiRide> rides = env.addSource(
					new TaxiRideSource(dataFilePath, MAX_EVENT_DELAY_DEFAULT, SERVING_SPEED_FACTOR_DEFAULT));

			TaxiRideCleansing taxiRideCleansing = new TaxiRideCleansing();
			DataStream<TaxiRide> filteredRides = taxiRideCleansing.execute(rides);

			filteredRides.addSink(new FlinkKafkaProducer010<>(
					"localhost:9092",      // Kafka broker host:port
					"cleansedRides",       // Topic to write to
					new TaxiRideSchema())  // Serializer (provided as util)
			);

//			filteredRides.print();
			env.execute("Running Taxi Ride Cleansing");
		}
	}
 
开发者ID:dineshtrivedi,项目名称:flink-java-project,代码行数:31,代码来源:TaxiRideCleansingRunner.java

示例14: dummyTest

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
	public void dummyTest() throws Exception {
		DateTime now = new DateTime();
		Collection<TaxiRide> taxiRides = new ArrayList<>();
		TaxiRide taxiRideNYC_1 = new TaxiRide(1, true, now, now, (float)GeoUtils.LON_EAST,
				(float)GeoUtils.LAT_NORTH, (float)GeoUtils.LON_WEST, (float)GeoUtils.LAT_SOUTH, (short)3);
		taxiRides.add(taxiRideNYC_1);

		TaxiRide taxiRideNYC_2 = new TaxiRide(2, true, now, now, (float)GeoUtils.LON_EAST,
				(float)GeoUtils.LAT_NORTH, (float)GeoUtils.LON_WEST, (float)GeoUtils.LAT_SOUTH, (short)3);
		taxiRides.add(taxiRideNYC_2);

		TaxiRide taxiRideNotInNYC_1 = new TaxiRide(2, true, now, now, (float)GeoUtils.LON_EAST + 1,
				(float)GeoUtils.LAT_NORTH, (float)GeoUtils.LON_WEST, (float)GeoUtils.LAT_SOUTH, (short)3);
		taxiRides.add(taxiRideNotInNYC_1);

		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(1);
		DataStream<TaxiRide> rides = env.fromCollection(taxiRides);

		TaxiRideCleansing taxiRideCleansing = new TaxiRideCleansing();

		DataStream<TaxiRide> filteredRides = taxiRideCleansing.execute(rides);

		Collection<TaxiRide> RESULTS = new ArrayList<>();
		// And perform an Identity map, because we want to write all values of this day to the Database:
		filteredRides.addSink(new ResultsSinkFunction(RESULTS));

		env.execute("Running Taxi Ride Cleansing");

//		Assert.assertEquals(2, RESULTS.size());
		Assert.assertTrue(true);
	}
 
开发者ID:dineshtrivedi,项目名称:flink-java-project,代码行数:34,代码来源:TaxiRideCleansingTest.java

示例15: standardReadWriteSimulator

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public void standardReadWriteSimulator(final StreamId inStreamId, final StreamId outStreamId, final StreamUtils streamUtils, int numElements) throws Exception {

		final int checkpointInterval = 100;
		final int taskFailureRestartAttempts = 1;
		final long delayBetweenRestartAttempts = 0L;
		final long startTime = 0L;
		final String jobName = "standardReadWriteSimulator";

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(parallelism);
		env.enableCheckpointing(checkpointInterval);
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(taskFailureRestartAttempts, delayBetweenRestartAttempts));

		// the Pravega reader
		final FlinkPravegaReader<Integer> pravegaSource = streamUtils.getFlinkPravegaParams().newReader(inStreamId, startTime, Integer.class);

		// Pravega Writer
		FlinkPravegaWriter<Integer> pravegaWriter = streamUtils.getFlinkPravegaParams().newWriter(outStreamId, Integer.class, new IdentityRouter<>());
		pravegaWriter.setPravegaWriterMode(PravegaWriterMode.ATLEAST_ONCE);

		DataStream<Integer> stream = env.addSource(pravegaSource).map(new IdentityMapper<>());

		stream.addSink(pravegaWriter);

		stream.addSink(new IntSequenceExactlyOnceValidator(numElements));

		env.execute(jobName);

	}
 
开发者ID:pravega,项目名称:nautilus-samples,代码行数:30,代码来源:EventCounterApp.java


注:本文中的org.apache.flink.streaming.api.datastream.DataStream.addSink方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。