当前位置: 首页>>代码示例>>Java>>正文


Java DataStreamSource类代码示例

本文整理汇总了Java中org.apache.flink.streaming.api.datastream.DataStreamSource的典型用法代码示例。如果您正苦于以下问题:Java DataStreamSource类的具体用法?Java DataStreamSource怎么用?Java DataStreamSource使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DataStreamSource类属于org.apache.flink.streaming.api.datastream包,在下文中一共展示了DataStreamSource类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();

    Properties properties = new Properties();
    properties.load(new FileInputStream("src/main/resources/application.properties"));

    Properties mqttProperties = new Properties();

    // client id = a:<Organization_ID>:<App_Id>
    mqttProperties.setProperty(MQTTSource.CLIENT_ID,
            String.format("a:%s:%s",
                    properties.getProperty("Org_ID"),
                    properties.getProperty("App_Id")));

    // mqtt server url = tcp://<Org_ID>.messaging.internetofthings.ibmcloud.com:1883
    mqttProperties.setProperty(MQTTSource.URL,
            String.format("tcp://%s.messaging.internetofthings.ibmcloud.com:1883",
                    properties.getProperty("Org_ID")));

    // topic = iot-2/type/<Device_Type>/id/<Device_ID>/evt/<Event_Id>/fmt/json
    mqttProperties.setProperty(MQTTSource.TOPIC,
            String.format("iot-2/type/%s/id/%s/evt/%s/fmt/json",
                    properties.getProperty("Device_Type"),
                    properties.getProperty("Device_ID"),
                    properties.getProperty("EVENT_ID")));

    mqttProperties.setProperty(MQTTSource.USERNAME, properties.getProperty("API_Key"));
    mqttProperties.setProperty(MQTTSource.PASSWORD, properties.getProperty("APP_Authentication_Token"));


    MQTTSource mqttSource = new MQTTSource(mqttProperties);
    DataStreamSource<String> tempratureDataSource = env.addSource(mqttSource);
    DataStream<String> stream = tempratureDataSource.map((MapFunction<String, String>) s -> s);
    stream.print();

    env.execute("Temperature Analysis");
}
 
开发者ID:pkhanal,项目名称:flink-watson-iot-connector,代码行数:38,代码来源:DeviceDataAnalysis.java

示例2: testEventTimeOrderedWriter

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
@Test
public void testEventTimeOrderedWriter() throws Exception {
    StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.createLocalEnvironment();

    String streamName = "testEventTimeOrderedWriter";
    SETUP_UTILS.createTestStream(streamName, 1);

    DataStreamSource<Integer> dataStream = execEnv
            .addSource(new IntegerGeneratingSource(false, EVENT_COUNT_PER_SOURCE));

    FlinkPravegaWriter<Integer> pravegaSink = new FlinkPravegaWriter<>(
            SETUP_UTILS.getControllerUri(),
            SETUP_UTILS.getScope(),
            streamName,
            new IntSerializer(),
            event -> "fixedkey");

    FlinkPravegaUtils.writeToPravegaInEventTimeOrder(dataStream, pravegaSink, 1);
    Assert.assertNotNull(execEnv.getExecutionPlan());
}
 
开发者ID:pravega,项目名称:flink-connectors,代码行数:21,代码来源:FlinkPravegaWriterITCase.java

示例3: redisSinkTest

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
@Test
public void redisSinkTest() throws Exception {
    sinkThread.start();
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    FlinkJedisPoolConfig jedisPoolConfig = new FlinkJedisPoolConfig.Builder()
        .setHost(REDIS_HOST)
        .setPort(REDIS_PORT).build();
    DataStreamSource<Tuple2<String, String>> source = env.addSource(new TestSourceFunction());

    RedisSink<Tuple2<String, String>> redisSink = new RedisSink<>(jedisPoolConfig, new RedisTestMapper());

    source.addSink(redisSink);

    env.execute("Redis Sink Test");

    assertEquals(NUM_ELEMENTS, sourceList.size());
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:18,代码来源:RedisSinkPublishITCase.java

示例4: testRedisSortedSetDataType

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
@Test
public void testRedisSortedSetDataType() throws Exception {
    DataStreamSource<Tuple2<String, String>> source = env.addSource(new TestSourceFunctionSortedSet());
    RedisSink<Tuple2<String, String>> redisZaddSink = new RedisSink<>(jedisPoolConfig,
        new RedisAdditionalDataMapper(RedisCommand.ZADD));

    source.addSink(redisZaddSink);
    env.execute("Test ZADD");

    assertEquals(NUM_ELEMENTS, jedis.zcard(REDIS_ADDITIONAL_KEY));

    RedisSink<Tuple2<String, String>> redisZremSink = new RedisSink<>(jedisPoolConfig,
            new RedisAdditionalDataMapper(RedisCommand.ZREM));

    source.addSink(redisZremSink);
    env.execute("Test ZREM");

    assertEquals(ZERO, jedis.zcard(REDIS_ADDITIONAL_KEY));

    jedis.del(REDIS_ADDITIONAL_KEY);
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:22,代码来源:RedisSinkITCase.java

示例5: createProducerTopology

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
private void createProducerTopology(StreamExecutionEnvironment env, AMQSinkConfig<String> config) {
    DataStreamSource<String> stream = env.addSource(new SourceFunction<String>() {
        @Override
        public void run(SourceContext<String> ctx) throws Exception {
            for (int i = 0; i < MESSAGES_NUM; i++) {
                ctx.collect("amq-" + i);
            }
        }

        @Override
        public void cancel() {}
    });


    AMQSink<String> sink = new AMQSink<>(config);
    stream.addSink(sink);
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:18,代码来源:ActiveMQConnectorITCase.java

示例6: main

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	DataStreamSource<Tuple2<String, Integer>> source = env.fromCollection(collection);

	CassandraSink.addSink(source)
		.setQuery(INSERT)
		.setClusterBuilder(new ClusterBuilder() {
			@Override
			protected Cluster buildCluster(Builder builder) {
				return builder.addContactPoint("127.0.0.1").build();
			}
		})
		.build();

	env.execute("WriteTupleIntoCassandra");
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:18,代码来源:CassandraTupleSinkExample.java

示例7: main

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	DataStreamSource<Message> source = env.fromCollection(messages);

	CassandraSink.addSink(source)
		.setClusterBuilder(new ClusterBuilder() {
			@Override
			protected Cluster buildCluster(Builder builder) {
				return builder.addContactPoint("127.0.0.1").build();
			}
		})
		.setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)})
		.build();

	env.execute("Cassandra Sink example");
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:18,代码来源:CassandraPojoSinkExample.java

示例8: redisSinkTest

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
@Test
public void redisSinkTest() throws Exception {
	sinkThread.start();
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	FlinkJedisPoolConfig jedisPoolConfig = new FlinkJedisPoolConfig.Builder()
		.setHost(REDIS_HOST)
		.setPort(REDIS_PORT).build();
	DataStreamSource<Tuple2<String, String>> source = env.addSource(new TestSourceFunction());

	RedisSink<Tuple2<String, String>> redisSink = new RedisSink<>(jedisPoolConfig, new RedisTestMapper());

	source.addSink(redisSink);

	env.execute("Redis Sink Test");

	assertEquals(NUM_ELEMENTS, sourceList.size());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:18,代码来源:RedisSinkPublishITCase.java

示例9: runTransportClientTest

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
/**
 * Tests that the Elasticsearch sink works properly using a {@link TransportClient}.
 */
public void runTransportClientTest() throws Exception {
	final String index = "transport-client-test-index";

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction());

	Map<String, String> userConfig = new HashMap<>();
	// This instructs the sink to emit after every element, otherwise they would be buffered
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
	userConfig.put("cluster.name", CLUSTER_NAME);

	source.addSink(createElasticsearchSinkForEmbeddedNode(
		userConfig, new SourceSinkDataTestKit.TestElasticsearchSinkFunction(index)));

	env.execute("Elasticsearch TransportClient Test");

	// verify the results
	Client client = embeddedNodeEnv.getClient();
	SourceSinkDataTestKit.verifyProducedSinkData(client, index);

	client.close();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:ElasticsearchSinkTestBase.java

示例10: runTransportClientFailsTest

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
/**
 * Tests whether the Elasticsearch sink fails when there is no cluster to connect to.
 */
public void runTransportClientFailsTest() throws Exception {
	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction());

	Map<String, String> userConfig = new HashMap<>();
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
	userConfig.put("cluster.name", "my-transport-client-cluster");

	source.addSink(createElasticsearchSinkForEmbeddedNode(
		userConfig, new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test")));

	try {
		env.execute("Elasticsearch Transport Client Test");
	} catch (JobExecutionException expectedException) {
		assertTrue(expectedException.getCause().getMessage().contains("not connected to any Elasticsearch nodes"));
		return;
	}

	fail();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:ElasticsearchSinkTestBase.java

示例11: testKafkaConsumer

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
@Test
@SuppressWarnings("unchecked")
public void testKafkaConsumer() {
	KafkaTableSource.Builder b = getBuilder();
	configureBuilder(b);

	// assert that correct
	KafkaTableSource observed = spy(b.build());
	StreamExecutionEnvironment env = mock(StreamExecutionEnvironment.class);
	when(env.addSource(any(SourceFunction.class))).thenReturn(mock(DataStreamSource.class));
	observed.getDataStream(env);

	verify(env).addSource(any(getFlinkKafkaConsumer()));

	verify(observed).getKafkaConsumer(
		eq(TOPIC),
		eq(PROPS),
		any(getDeserializationSchema()));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:20,代码来源:KafkaTableSourceTestBase.java

示例12: fromElements

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
/**
 * Creates a new data stream that contains the given elements. The elements must all be of the
 * same type, for example, all of the {@link String} or {@link Integer}.
 *
 * <p>The framework will try and determine the exact type from the elements. In case of generic
 * elements, it may be necessary to manually supply the type information via
 * {@link #fromCollection(java.util.Collection, org.apache.flink.api.common.typeinfo.TypeInformation)}.
 *
 * <p>Note that this operation will result in a non-parallel data stream source, i.e. a data
 * stream source with a degree of parallelism one.
 *
 * @param data
 * 		The array of elements to create the data stream from.
 * @param <OUT>
 * 		The type of the returned data stream
 * @return The data stream representing the given array of elements
 */
@SafeVarargs
public final <OUT> DataStreamSource<OUT> fromElements(OUT... data) {
	if (data.length == 0) {
		throw new IllegalArgumentException("fromElements needs at least one element as argument");
	}

	TypeInformation<OUT> typeInfo;
	try {
		typeInfo = TypeExtractor.getForObject(data[0]);
	}
	catch (Exception e) {
		throw new RuntimeException("Could not create TypeInformation for type " + data[0].getClass().getName()
				+ "; please specify the TypeInformation manually via "
				+ "StreamExecutionEnvironment#fromElements(Collection, TypeInformation)");
	}
	return fromCollection(Arrays.asList(data), typeInfo);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:35,代码来源:StreamExecutionEnvironment.java

示例13: fromCollection

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
/**
 * Creates a data stream from the given non-empty collection. The type of the data stream is that of the
 * elements in the collection.
 *
 * <p>The framework will try and determine the exact type from the collection elements. In case of generic
 * elements, it may be necessary to manually supply the type information via
 * {@link #fromCollection(java.util.Collection, org.apache.flink.api.common.typeinfo.TypeInformation)}.
 *
 * <p>Note that this operation will result in a non-parallel data stream source, i.e. a data stream source with
 * parallelism one.
 *
 * @param data
 * 		The collection of elements to create the data stream from.
 * @param <OUT>
 *     The generic type of the returned data stream.
 * @return
 *     The data stream representing the given collection
 */
public <OUT> DataStreamSource<OUT> fromCollection(Collection<OUT> data) {
	Preconditions.checkNotNull(data, "Collection must not be null");
	if (data.isEmpty()) {
		throw new IllegalArgumentException("Collection must not be empty");
	}

	OUT first = data.iterator().next();
	if (first == null) {
		throw new IllegalArgumentException("Collection must not contain null elements");
	}

	TypeInformation<OUT> typeInfo;
	try {
		typeInfo = TypeExtractor.getForObject(first);
	}
	catch (Exception e) {
		throw new RuntimeException("Could not create TypeInformation for type " + first.getClass()
				+ "; please specify the TypeInformation manually via "
				+ "StreamExecutionEnvironment#fromElements(Collection, TypeInformation)");
	}
	return fromCollection(data, typeInfo);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:41,代码来源:StreamExecutionEnvironment.java

示例14: testTransportClientFails

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
@Test(expected = JobExecutionException.class)
public void testTransportClientFails() throws Exception{
	// this checks whether the TransportClient fails early when there is no cluster to
	// connect to. We don't hava such as test for the Node Client version since that
	// one will block and wait for a cluster to come online

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction());

	Map<String, String> config = Maps.newHashMap();
	// This instructs the sink to emit after every element, otherwise they would be buffered
	config.put(ElasticsearchSink.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
	config.put("cluster.name", "my-node-client-cluster");

	// connect to our local node
	config.put("node.local", "true");

	List<TransportAddress> transports = Lists.newArrayList();
	transports.add(new LocalTransportAddress("1"));

	source.addSink(new ElasticsearchSink<>(config, transports, new TestIndexRequestBuilder()));

	env.execute("Elasticsearch Node Client Test");
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:26,代码来源:ElasticsearchSinkITCase.java

示例15: testTransportClientFails

import org.apache.flink.streaming.api.datastream.DataStreamSource; //导入依赖的package包/类
@Test(expected = JobExecutionException.class)
public void testTransportClientFails() throws Exception{
	// this checks whether the TransportClient fails early when there is no cluster to
	// connect to. There isn't a similar test for the Node Client version since that
	// one will block and wait for a cluster to come online

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction());

	Map<String, String> config = new HashMap<>();
	// This instructs the sink to emit after every element, otherwise they would be buffered
	config.put(ElasticsearchSink.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
	config.put("cluster.name", "my-node-client-cluster");

	List<InetSocketAddress> transports = new ArrayList<>();
	transports.add(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9300));

	source.addSink(new ElasticsearchSink<>(config, transports, new TestElasticsearchSinkFunction()));

	env.execute("Elasticsearch Node Client Test");
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:23,代码来源:ElasticsearchSinkITCase.java


注:本文中的org.apache.flink.streaming.api.datastream.DataStreamSource类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。