本文整理汇总了Java中org.apache.flink.streaming.api.datastream.DataStreamSource.addSink方法的典型用法代码示例。如果您正苦于以下问题:Java DataStreamSource.addSink方法的具体用法?Java DataStreamSource.addSink怎么用?Java DataStreamSource.addSink使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.streaming.api.datastream.DataStreamSource
的用法示例。
在下文中一共展示了DataStreamSource.addSink方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testRedisSortedSetDataType
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
@Test
public void testRedisSortedSetDataType() throws Exception {
DataStreamSource<Tuple2<String, String>> source = env.addSource(new TestSourceFunctionSortedSet());
RedisSink<Tuple2<String, String>> redisZaddSink = new RedisSink<>(jedisPoolConfig,
new RedisAdditionalDataMapper(RedisCommand.ZADD));
source.addSink(redisZaddSink);
env.execute("Test ZADD");
assertEquals(NUM_ELEMENTS, jedis.zcard(REDIS_ADDITIONAL_KEY));
RedisSink<Tuple2<String, String>> redisZremSink = new RedisSink<>(jedisPoolConfig,
new RedisAdditionalDataMapper(RedisCommand.ZREM));
source.addSink(redisZremSink);
env.execute("Test ZREM");
assertEquals(ZERO, jedis.zcard(REDIS_ADDITIONAL_KEY));
jedis.del(REDIS_ADDITIONAL_KEY);
}
示例2: createProducerTopology
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
private void createProducerTopology(StreamExecutionEnvironment env, AMQSinkConfig<String> config) {
DataStreamSource<String> stream = env.addSource(new SourceFunction<String>() {
@Override
public void run(SourceContext<String> ctx) throws Exception {
for (int i = 0; i < MESSAGES_NUM; i++) {
ctx.collect("amq-" + i);
}
}
@Override
public void cancel() {}
});
AMQSink<String> sink = new AMQSink<>(config);
stream.addSink(sink);
}
示例3: testTransportClientFails
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
@Test(expected = JobExecutionException.class)
public void testTransportClientFails() throws Exception{
// this checks whether the TransportClient fails early when there is no cluster to
// connect to. There isn't a similar test for the Node Client version since that
// one will block and wait for a cluster to come online
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction());
Map<String, String> config = new HashMap<>();
// This instructs the sink to emit after every element, otherwise they would be buffered
config.put(ElasticsearchSink.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
config.put("cluster.name", "my-node-client-cluster");
List<InetSocketAddress> transports = new ArrayList<>();
transports.add(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9300));
source.addSink(new ElasticsearchSink<>(config, transports, new TestElasticsearchSinkFunction()));
env.execute("Elasticsearch Node Client Test");
}
示例4: redisSinkTest
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
@Test
public void redisSinkTest() throws Exception {
sinkThread.start();
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
FlinkJedisPoolConfig jedisPoolConfig = new FlinkJedisPoolConfig.Builder()
.setHost(REDIS_HOST)
.setPort(REDIS_PORT).build();
DataStreamSource<Tuple2<String, String>> source = env.addSource(new TestSourceFunction());
RedisSink<Tuple2<String, String>> redisSink = new RedisSink<>(jedisPoolConfig, new RedisTestMapper());
source.addSink(redisSink);
env.execute("Redis Sink Test");
assertEquals(NUM_ELEMENTS, sourceList.size());
}
示例5: runTransportClientFailsTest
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
/**
* Tests whether the Elasticsearch sink fails when there is no cluster to connect to.
*/
public void runTransportClientFailsTest() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction());
Map<String, String> userConfig = new HashMap<>();
userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
userConfig.put("cluster.name", "my-transport-client-cluster");
source.addSink(createElasticsearchSinkForEmbeddedNode(
userConfig, new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test")));
try {
env.execute("Elasticsearch Transport Client Test");
} catch (JobExecutionException expectedException) {
assertTrue(expectedException.getCause().getMessage().contains("not connected to any Elasticsearch nodes"));
return;
}
fail();
}
示例6: testRedisSetDataType
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
@Test
public void testRedisSetDataType() throws Exception {
DataStreamSource<Tuple2<String, String>> source = env.addSource(new TestSourceFunction());
RedisSink<Tuple2<String, String>> redisSink = new RedisSink<>(jedisPoolConfig,
new RedisCommandMapper(RedisCommand.SADD));
source.addSink(redisSink);
env.execute("Test Redis Set Data Type");
assertEquals(NUM_ELEMENTS, jedis.scard(REDIS_KEY));
jedis.del(REDIS_KEY);
}
示例7: testRedisHyperLogLogDataType
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
@Test
public void testRedisHyperLogLogDataType() throws Exception {
DataStreamSource<Tuple2<String, String>> source = env.addSource(new TestSourceFunction());
RedisSink<Tuple2<String, String>> redisSink = new RedisSink<>(jedisPoolConfig,
new RedisCommandMapper(RedisCommand.PFADD));
source.addSink(redisSink);
env.execute("Test Redis Hyper Log Log Data Type");
assertEquals(NUM_ELEMENTS, Long.valueOf(jedis.pfcount(REDIS_KEY)));
jedis.del(REDIS_KEY);
}
示例8: runFailOnAutoOffsetResetNone
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
/**
* Ensure that the consumer is properly failing if "auto.offset.reset" is set to "none"
* @throws Exception
*/
public void runFailOnAutoOffsetResetNone() throws Exception {
final String topic = "auto-offset-reset-none-test";
final int parallelism = 1;
kafkaServer.createTestTopic(topic, parallelism, 1);
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.createRemoteEnvironment("localhost", flink.getLeaderRPCPort());
env.setParallelism(parallelism);
env.setRestartStrategy(RestartStrategies.noRestart()); // fail immediately
env.getConfig().disableSysoutLogging();
// ----------- add consumer ----------
Properties customProps = new Properties();
customProps.putAll(standardProps);
customProps.putAll(secureProps);
customProps.setProperty("auto.offset.reset", "none"); // test that "none" leads to an exception
FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, new SimpleStringSchema(), customProps);
DataStreamSource<String> consuming = env.addSource(source);
consuming.addSink(new DiscardingSink<String>());
try {
env.execute("Test auto offset reset none");
} catch(Throwable e) {
System.out.println("MESSAGE: " + e.getCause().getCause().getMessage());
// check if correct exception has been thrown
if(!e.getCause().getCause().getMessage().contains("Unable to find previous offset") // kafka 0.8
&& !e.getCause().getCause().getMessage().contains("Undefined offset with no reset policy for partition") // kafka 0.9
) {
throw e;
}
}
kafkaServer.deleteTestTopic(topic);
}
示例9: getFunctionFromDataSource
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private static <T> SourceFunction<T> getFunctionFromDataSource(DataStreamSource<T> dataStreamSource) {
dataStreamSource.addSink(new DiscardingSink<T>());
AbstractUdfStreamOperator<?, ?> operator =
(AbstractUdfStreamOperator<?, ?>) getOperatorFromDataStream(dataStreamSource);
return (SourceFunction<T>) operator.getUserFunction();
}
示例10: testRedisListDataType
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
@Test
public void testRedisListDataType() throws Exception {
DataStreamSource<Tuple2<String, String>> source = env.addSource(new TestSourceFunction());
RedisSink<Tuple2<String, String>> redisSink = new RedisSink<>(jedisPoolConfig,
new RedisCommandMapper(RedisCommand.LPUSH));
source.addSink(redisSink);
env.execute("Test Redis List Data Type");
assertEquals(NUM_ELEMENTS, jedis.llen(REDIS_KEY));
jedis.del(REDIS_KEY);
}
示例11: testRedisHyperLogLogDataType
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
@Test
public void testRedisHyperLogLogDataType() throws Exception {
DataStreamSource<Tuple2<String, String>> source = env.addSource(new TestSourceFunction());
RedisSink<Tuple2<String, String>> redisSink = new RedisSink<>(jedisPoolConfig,
new RedisCommandMapper(RedisCommand.PFADD));
source.addSink(redisSink);
env.execute("Test Redis Hyper Log Log Data Type");
assertEquals(NUM_ELEMENTS, Long.valueOf(jedis.pfcount(REDIS_KEY)));
jedis.del(REDIS_KEY);
}
示例12: testCassandraPojoAtLeastOnceSink
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
@Test
public void testCassandraPojoAtLeastOnceSink() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
DataStreamSource<Pojo> source = env
.addSource(new SourceFunction<Pojo>() {
private boolean running = true;
private volatile int cnt = 0;
@Override
public void run(SourceContext<Pojo> ctx) throws Exception {
while (running) {
ctx.collect(new Pojo(UUID.randomUUID().toString(), cnt, 0));
cnt++;
if (cnt == 20) {
cancel();
}
}
}
@Override
public void cancel() {
running = false;
}
});
source.addSink(new CassandraPojoSink<>(Pojo.class, builder));
env.execute();
ResultSet rs = session.execute(SELECT_DATA_QUERY);
Assert.assertEquals(20, rs.all().size());
}
示例13: testRedisHashDataType
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
@Test
public void testRedisHashDataType() throws Exception {
DataStreamSource<Tuple2<String, String>> source = env.addSource(new TestSourceFunctionHash());
RedisSink<Tuple2<String, String>> redisSink = new RedisSink<>(jedisPoolConfig,
new RedisAdditionalDataMapper(RedisCommand.HSET));
source.addSink(redisSink);
env.execute("Test Redis Hash Data Type");
assertEquals(NUM_ELEMENTS, jedis.hlen(REDIS_ADDITIONAL_KEY));
jedis.del(REDIS_ADDITIONAL_KEY);
}
示例14: runFailOnAutoOffsetResetNone
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
/**
* Ensure that the consumer is properly failing if "auto.offset.reset" is set to "none".
* @throws Exception
*/
public void runFailOnAutoOffsetResetNone() throws Exception {
final String topic = "auto-offset-reset-none-test";
final int parallelism = 1;
kafkaServer.createTestTopic(topic, parallelism, 1);
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.createRemoteEnvironment("localhost", flink.getLeaderRPCPort());
env.setParallelism(parallelism);
env.setRestartStrategy(RestartStrategies.noRestart()); // fail immediately
env.getConfig().disableSysoutLogging();
// ----------- add consumer ----------
Properties customProps = new Properties();
customProps.putAll(standardProps);
customProps.putAll(secureProps);
customProps.setProperty("auto.offset.reset", "none"); // test that "none" leads to an exception
FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, new SimpleStringSchema(), customProps);
DataStreamSource<String> consuming = env.addSource(source);
consuming.addSink(new DiscardingSink<String>());
try {
env.execute("Test auto offset reset none");
} catch (Throwable e) {
// check if correct exception has been thrown
if (!e.getCause().getCause().getMessage().contains("Unable to find previous offset") // kafka 0.8
&& !e.getCause().getCause().getMessage().contains("Undefined offset with no reset policy for partition") // kafka 0.9
) {
throw e;
}
}
kafkaServer.deleteTestTopic(topic);
}
示例15: testSources
import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入方法依赖的package包/类
@Test
public void testSources() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
SourceFunction<Integer> srcFun = new SourceFunction<Integer>() {
private static final long serialVersionUID = 1L;
@Override
public void run(SourceContext<Integer> ctx) throws Exception {
}
@Override
public void cancel() {
}
};
DataStreamSource<Integer> src1 = env.addSource(srcFun);
src1.addSink(new DiscardingSink<Integer>());
assertEquals(srcFun, getFunctionFromDataSource(src1));
List<Long> list = Arrays.asList(0L, 1L, 2L);
DataStreamSource<Long> src2 = env.generateSequence(0, 2);
assertTrue(getFunctionFromDataSource(src2) instanceof StatefulSequenceSource);
DataStreamSource<Long> src3 = env.fromElements(0L, 1L, 2L);
assertTrue(getFunctionFromDataSource(src3) instanceof FromElementsFunction);
DataStreamSource<Long> src4 = env.fromCollection(list);
assertTrue(getFunctionFromDataSource(src4) instanceof FromElementsFunction);
}