当前位置: 首页>>代码示例>>Java>>正文


Java StreamExecutionEnvironment.getExecutionEnvironment方法代码示例

本文整理汇总了Java中org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.getExecutionEnvironment方法的典型用法代码示例。如果您正苦于以下问题:Java StreamExecutionEnvironment.getExecutionEnvironment方法的具体用法?Java StreamExecutionEnvironment.getExecutionEnvironment怎么用?Java StreamExecutionEnvironment.getExecutionEnvironment使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.streaming.api.environment.StreamExecutionEnvironment的用法示例。


在下文中一共展示了StreamExecutionEnvironment.getExecutionEnvironment方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);

        final Hashtable<String, String> jmsEnv = new Hashtable<>();
        jmsEnv.put(InitialContext.INITIAL_CONTEXT_FACTORY, "com.solacesystems.jndi.SolJNDIInitialContextFactory");
        jmsEnv.put(InitialContext.PROVIDER_URL, "smf://192.168.56.101");
        jmsEnv.put(Context.SECURITY_PRINCIPAL, "[email protected]_vpn");
        jmsEnv.put(Context.SECURITY_CREDENTIALS, "password");

        env.addSource(new JMSQueueSource<String>(jmsEnv,
                "flink_cf",
                "flink_queue",
                new JMSTextTranslator()))
                .print();

        env.execute();
    }
 
开发者ID:SolaceLabs,项目名称:solace-integration-guides,代码行数:20,代码来源:BasicQueueStreamingSample.java

示例2: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    // set up the execution environment
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    Properties props = new Properties();
    props.setProperty(TwitterSource.CONSUMER_KEY, "...");
    props.setProperty(TwitterSource.CONSUMER_SECRET, "...");
    props.setProperty(TwitterSource.TOKEN, "...");
    props.setProperty(TwitterSource.TOKEN_SECRET, "...");

    env.addSource(new TwitterSource(props))
        .flatMap(new ExtractHashTags())
        .keyBy(0)
        .timeWindow(Time.seconds(30))
        .sum(1)
        .filter(new FilterHashTags())
        .timeWindowAll(Time.seconds(30))
        .apply(new GetTopHashTag())
        .print();

    env.execute();
}
 
开发者ID:mushketyk,项目名称:flink-examples,代码行数:23,代码来源:TopTweet.java

示例3: publishUsingFlinkConnector

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
private void publishUsingFlinkConnector(AppConfiguration appConfiguration) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	StreamId streamId = getStreamId();
	FlinkPravegaWriter<Event> writer = pravega.newWriter(streamId, Event.class, new EventRouter());

	if(appConfiguration.getProducer().isControlledEnv()) {
		if(!(env instanceof LocalStreamEnvironment)) {
			throw new Exception("Use a local Flink environment or set controlledEnv to false in app.json.");
		}
		//setting this to single instance since the controlled run allows user inout to trigger error events
		env.setParallelism(1);
		long latency = appConfiguration.getProducer().getLatencyInMilliSec();
		int capacity = appConfiguration.getProducer().getCapacity();
		ControlledSourceContextProducer controlledSourceContextProducer = new ControlledSourceContextProducer(capacity, latency);
		env.addSource(controlledSourceContextProducer).name("EventSource").addSink(writer).name("Pravega-" + streamId.getName());
	} else {
		SourceContextProducer sourceContextProducer = new SourceContextProducer(appConfiguration);
		env.addSource(sourceContextProducer).name("EventSource").addSink(writer).name("Pravega-" + streamId.getName());
	}

	env.execute(appConfiguration.getName()+"-producer");
}
 
开发者ID:pravega,项目名称:nautilus-samples,代码行数:24,代码来源:PravegaEventPublisher.java

示例4: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	Properties properties = new Properties();
	properties.setProperty("bootstrap.servers", "localhost:9092");
	properties.setProperty("zookeeper.connect", "localhost:2181");
	properties.setProperty("group.id", "test");
	properties.setProperty("auto.offset.reset", "latest");
	FlinkKafkaConsumer08<DeviceEvent> flinkKafkaConsumer08 = new FlinkKafkaConsumer08<>("device-data",
			new DeviceSchema(), properties);

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<DeviceEvent> messageStream = env.addSource(flinkKafkaConsumer08);
	
	Map<String, String> config = new HashMap<>();
	config.put("cluster.name", "my-application");
	// This instructs the sink to emit after every element, otherwise they would be buffered
	config.put("bulk.flush.max.actions", "1");

	List<InetSocketAddress> transportAddresses = new ArrayList<>();
	transportAddresses.add(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9300));

	messageStream.addSink(new ElasticsearchSink<DeviceEvent>(config, transportAddresses, new ESSink()));
	env.execute();
}
 
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:24,代码来源:FlinkESConnector.java

示例5: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@SuppressWarnings("Convert2Lambda")
public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<String> dataStream = streamExecutionEnvironment.readTextFile("file:///tmp/flink-esper-input");
    
    EsperStream<String> esperStream = Esper.pattern(dataStream, "select bytes from String");

    DataStream<String> result = esperStream.select(new EsperSelectFunction<String>() {
        private static final long serialVersionUID = 7093943872082195786L;

        @Override
        public String select(EventBean eventBean) throws Exception {
            return new String((byte[]) eventBean.get("bytes"));
        }
    });

    result.writeAsText("file:///tmp/flink-esper-output");

    streamExecutionEnvironment.execute("Simple Flink Esper Example");
}
 
开发者ID:phil3k3,项目名称:flink-esper,代码行数:21,代码来源:FlinkTestClass.java

示例6: testCustomizeSiddhiFunctionExtension

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testCustomizeSiddhiFunctionExtension() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Event> input = env.addSource(new RandomEventSource(5));

    SiddhiCEP cep = SiddhiCEP.getSiddhiEnvironment(env);
    cep.registerExtension("custom:plus", CustomPlusFunctionExtension.class);

    DataStream<Map<String, Object>> output = cep
        .from("inputStream", input, "id", "name", "price", "timestamp")
        .cql("from inputStream select timestamp, id, name, custom:plus(price,price) as doubled_price insert into  outputStream")
        .returnAsMap("outputStream");

    String resultPath = tempFolder.newFile().toURI().toString();
    output.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
    env.execute();
    assertEquals(5, getLineCount(resultPath));
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:19,代码来源:SiddhiCEPITCase.java

示例7: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
	see.setParallelism(1);

	Properties kinesisConsumerConfig = new Properties();
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_REGION, pt.getRequired("region"));
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_ACCESS_KEY_ID, pt.getRequired("accesskey"));
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_SECRET_ACCESS_KEY, pt.getRequired("secretkey"));

	DataStream<String> kinesis = see.addSource(new FlinkKinesisConsumer<>(
		"flink-test",
		new SimpleStringSchema(),
		kinesisConsumerConfig));

	kinesis.print();

	see.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:21,代码来源:ConsumeFromKinesis.java

示例8: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
	see.setParallelism(1);

	DataStream<String> simpleStringStream = see.addSource(new EventsGenerator());

	Properties kinesisProducerConfig = new Properties();
	kinesisProducerConfig.setProperty(AWSConfigConstants.AWS_REGION, pt.getRequired("region"));
	kinesisProducerConfig.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, pt.getRequired("accessKey"));
	kinesisProducerConfig.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, pt.getRequired("secretKey"));

	FlinkKinesisProducer<String> kinesis = new FlinkKinesisProducer<>(
			new SimpleStringSchema(), kinesisProducerConfig);

	kinesis.setFailOnError(true);
	kinesis.setDefaultStream("flink-test");
	kinesis.setDefaultPartition("0");

	simpleStringStream.addSink(kinesis);

	see.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:ProduceIntoKinesis.java

示例9: testInheritOverride

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testInheritOverride() {
	// verify that we can explicitly disable inheritance of the input slot sharing groups

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	FilterFunction<Long> dummyFilter = new FilterFunction<Long>() {
		@Override
		public boolean filter(Long value) {
			return false;
		}
	};

	DataStream<Long> src1 = env.generateSequence(1, 10).slotSharingGroup("group-1");
	DataStream<Long> src2 = env.generateSequence(1, 10).slotSharingGroup("group-1");

	// this should not inherit group but be in "default"
	src1.union(src2).filter(dummyFilter).slotSharingGroup("default");
	JobGraph jobGraph = env.getStreamGraph().getJobGraph();

	List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();

	assertEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(1).getSlotSharingGroup());
	assertNotEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(2).getSlotSharingGroup());
	assertNotEquals(vertices.get(1).getSlotSharingGroup(), vertices.get(2).getSlotSharingGroup());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:SlotAllocationTest.java

示例10: testFoldProcessingTime

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
@SuppressWarnings("rawtypes")
public void testFoldProcessingTime() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2));

	DataStream<Tuple3<String, String, Integer>> window = source
			.keyBy(new TupleKeySelector())
			.window(SlidingProcessingTimeWindows.of(Time.of(1, TimeUnit.SECONDS), Time.of(100, TimeUnit.MILLISECONDS)))
			.fold(new Tuple3<>("", "", 0), new DummyFolder());

	OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>> transform =
			(OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String,  Integer>>) window.getTransformation();
	OneInputStreamOperator<Tuple2<String, Integer>, Tuple3<String, String, Integer>> operator = transform.getOperator();
	Assert.assertTrue(operator instanceof WindowOperator);
	WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator;
	Assert.assertTrue(winOperator.getTrigger() instanceof ProcessingTimeTrigger);
	Assert.assertTrue(winOperator.getWindowAssigner() instanceof SlidingProcessingTimeWindows);
	Assert.assertTrue(winOperator.getStateDescriptor() instanceof FoldingStateDescriptor);

	processElementAndEnsureOutput(winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:WindowTranslationTest.java

示例11: dummyTest

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
	public void dummyTest() throws Exception {
		DateTime now = new DateTime();
		Collection<TaxiRide> taxiRides = new ArrayList<>();
		TaxiRide taxiRideNYC_1 = new TaxiRide(1, true, now, now, (float)GeoUtils.LON_EAST,
				(float)GeoUtils.LAT_NORTH, (float)GeoUtils.LON_WEST, (float)GeoUtils.LAT_SOUTH, (short)3);
		taxiRides.add(taxiRideNYC_1);

		TaxiRide taxiRideNYC_2 = new TaxiRide(2, true, now, now, (float)GeoUtils.LON_EAST,
				(float)GeoUtils.LAT_NORTH, (float)GeoUtils.LON_WEST, (float)GeoUtils.LAT_SOUTH, (short)3);
		taxiRides.add(taxiRideNYC_2);

		TaxiRide taxiRideNotInNYC_1 = new TaxiRide(2, true, now, now, (float)GeoUtils.LON_EAST + 1,
				(float)GeoUtils.LAT_NORTH, (float)GeoUtils.LON_WEST, (float)GeoUtils.LAT_SOUTH, (short)3);
		taxiRides.add(taxiRideNotInNYC_1);

		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(1);
		DataStream<TaxiRide> rides = env.fromCollection(taxiRides);

		TaxiRideCleansing taxiRideCleansing = new TaxiRideCleansing();

		DataStream<TaxiRide> filteredRides = taxiRideCleansing.execute(rides);

		Collection<TaxiRide> RESULTS = new ArrayList<>();
		// And perform an Identity map, because we want to write all values of this day to the Database:
		filteredRides.addSink(new ResultsSinkFunction(RESULTS));

		env.execute("Running Taxi Ride Cleansing");

//		Assert.assertEquals(2, RESULTS.size());
		Assert.assertTrue(true);
	}
 
开发者ID:dineshtrivedi,项目名称:flink-java-project,代码行数:34,代码来源:TaxiRideCleansingTest.java

示例12: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	Properties properties = new Properties();
	properties.setProperty("bootstrap.servers", "localhost:9092");
	properties.setProperty("zookeeper.connect", "localhost:2181");
	properties.setProperty("group.id", "test");
	properties.setProperty("auto.offset.reset", "latest");
	FlinkKafkaConsumer08<String> flinkKafkaConsumer08 = new FlinkKafkaConsumer08<>("device-data",
			new SimpleStringSchema(), properties);

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<Tuple4<Long,Integer,Integer,Long>> messageStream = env.addSource(flinkKafkaConsumer08).map(new MapFunction<String, Tuple4<Long,Integer,Integer,Long>>() {

		private static final long serialVersionUID = 4723214570372887208L;

		@Override
		public Tuple4<Long,Integer,Integer,Long> map(String input) throws Exception {
			String[] inputSplits = input.split(",");
			return Tuple4.of(Long.parseLong(inputSplits[0]), Integer.parseInt(inputSplits[1]), Integer.parseInt(inputSplits[2]), Long.parseLong(inputSplits[3]));
		}
	});

	CassandraSink.addSink(messageStream).setQuery("INSERT INTO tdr.packet_tdr (phone_number, bin, bout, timestamp) values (?, ?, ? ,?);")
			.setClusterBuilder(new ClusterBuilder() {
				private static final long serialVersionUID = 1L;

				@Override
				public Cluster buildCluster(Cluster.Builder builder) {
					return builder.addContactPoint("127.0.0.1").build();
				}
			}).build();
	env.execute();
}
 
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:33,代码来源:FlinkCassandraConnector.java

示例13: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<TemperatureEvent> inputEventStream = env.fromElements(new TemperatureEvent("xyz", 22.0),
			new TemperatureEvent("xyz", 20.1), new TemperatureEvent("xyz", 21.1), new TemperatureEvent("xyz", 22.2),
			new TemperatureEvent("xyz", 29.1), new TemperatureEvent("xyz", 22.3), new TemperatureEvent("xyz", 22.1),
			new TemperatureEvent("xyz", 22.4), new TemperatureEvent("xyz", 22.7),
			new TemperatureEvent("xyz", 27.0));

	Pattern<TemperatureEvent, ?> warningPattern = Pattern.<TemperatureEvent> begin("first")
			.subtype(TemperatureEvent.class).where(new FilterFunction<TemperatureEvent>() {
				private static final long serialVersionUID = 1L;

				public boolean filter(TemperatureEvent value) {
					if (value.getTemperature() >= 26.0) {
						return true;
					}
					return false;
				}
			}).within(Time.seconds(10));

	DataStream<Alert> patternStream = CEP.pattern(inputEventStream, warningPattern)
			.select(new PatternSelectFunction<TemperatureEvent, Alert>() {
				private static final long serialVersionUID = 1L;

				public Alert select(Map<String, TemperatureEvent> event) throws Exception {

					return new Alert("Temperature Rise Detected");
				}

			});

	patternStream.print();
	env.execute("CEP on Temperature Sensor");
}
 
开发者ID:PacktPublishing,项目名称:Mastering-Apache-Flink,代码行数:35,代码来源:App.java

示例14: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        final ParameterTool params = ParameterTool.fromArgs(args);
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.getConfig().setGlobalJobParameters(params);
        env.setParallelism(2);
        env.enableCheckpointing(5000);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        env.setStateBackend(new FsStateBackend("file:///Users/zhouzhou/Binary/flink-1.3.2/testcheckpoints/"));
        RawLogGroupListDeserializer deserializer = new RawLogGroupListDeserializer();
        Properties configProps = new Properties();
        configProps.put(ConfigConstants.LOG_ENDPOINT, sEndpoint);
        configProps.put(ConfigConstants.LOG_ACCESSSKEYID, sAccessKeyId);
        configProps.put(ConfigConstants.LOG_ACCESSKEY, sAccessKey);
        configProps.put(ConfigConstants.LOG_PROJECT, sProject);
        configProps.put(ConfigConstants.LOG_LOGSTORE, sLogstore);
        configProps.put(ConfigConstants.LOG_MAX_NUMBER_PER_FETCH, "10");
        configProps.put(ConfigConstants.LOG_CONSUMER_BEGIN_POSITION, Consts.LOG_FROM_CHECKPOINT);
        configProps.put(ConfigConstants.LOG_CONSUMERGROUP, "23_ots_sla_etl_product");
        DataStream<RawLogGroupList> logTestStream = env.addSource(
                new FlinkLogConsumer<RawLogGroupList>(deserializer, configProps)
        );

        logTestStream.writeAsText("/Users/zhouzhou/Binary/flink-1.3.2/data/newb.txt." + System.nanoTime());
        env.execute("flink log connector");
    }
 
开发者ID:aliyun,项目名称:aliyun-log-flink-connector,代码行数:29,代码来源:ConsumerSample.java

示例15: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    // Read parameters from command line
    final ParameterTool params = ParameterTool.fromArgs(args);

    if(params.getNumberOfParameters() < 4) {
        System.out.println("\nUsage: FlinkReadKafka --read-topic <topic> --write-topic <topic> --bootstrap.servers <kafka brokers> --group.id <groupid>");
        return;
    }


    // setup streaming environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));
    env.enableCheckpointing(300000); // 300 seconds
    env.getConfig().setGlobalJobParameters(params);

    DataStream<String> messageStream = env
            .addSource(new FlinkKafkaConsumer010<>(
                    params.getRequired("read-topic"),
                    new SimpleStringSchema(),
                    params.getProperties())).name("Read from Kafka");

    // setup table environment
    StreamTableEnvironment sTableEnv = TableEnvironment.getTableEnvironment(env);


    // Write JSON payload back to Kafka topic
    messageStream.addSink(new FlinkKafkaProducer010<>(
                params.getRequired("write-topic"),
                new SimpleStringSchema(),
                params.getProperties())).name("Write To Kafka");

    env.execute("FlinkReadWriteKafka");
}
 
开发者ID:kgorman,项目名称:TrafficAnalyzer,代码行数:35,代码来源:FlinkReadWriteKafka.java


注:本文中的org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.getExecutionEnvironment方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。