本文整理汇总了Java中org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.execute方法的典型用法代码示例。如果您正苦于以下问题:Java StreamExecutionEnvironment.execute方法的具体用法?Java StreamExecutionEnvironment.execute怎么用?Java StreamExecutionEnvironment.execute使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
的用法示例。
在下文中一共展示了StreamExecutionEnvironment.execute方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
// set up the execution environment
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
Properties props = new Properties();
props.setProperty(TwitterSource.CONSUMER_KEY, "...");
props.setProperty(TwitterSource.CONSUMER_SECRET, "...");
props.setProperty(TwitterSource.TOKEN, "...");
props.setProperty(TwitterSource.TOKEN_SECRET, "...");
env.addSource(new TwitterSource(props))
.flatMap(new ExtractHashTags())
.keyBy(0)
.timeWindow(Time.seconds(30))
.sum(1)
.filter(new FilterHashTags())
.timeWindowAll(Time.seconds(30))
.apply(new GetTopHashTag())
.print();
env.execute();
}
示例2: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", "localhost:9092");
properties.setProperty("group.id", "test");
DataStream<TemperatureEvent> inputEventStream = env.addSource(
new FlinkKafkaConsumer09<TemperatureEvent>("test", new EventDeserializationSchema(), properties));
Pattern<TemperatureEvent, ?> warningPattern = Pattern.<TemperatureEvent> begin("first")
.subtype(TemperatureEvent.class).where(new FilterFunction<TemperatureEvent>() {
private static final long serialVersionUID = 1L;
public boolean filter(TemperatureEvent value) {
if (value.getTemperature() >= 26.0) {
return true;
}
return false;
}
}).within(Time.seconds(10));
DataStream<Alert> patternStream = CEP.pattern(inputEventStream, warningPattern)
.select(new PatternSelectFunction<TemperatureEvent, Alert>() {
private static final long serialVersionUID = 1L;
public Alert select(Map<String, TemperatureEvent> event) throws Exception {
return new Alert("Temperature Rise Detected:" + event.get("first").getTemperature()
+ " on machine name:" + event.get("first").getMachineName());
}
});
patternStream.print();
env.execute("CEP on Temperature Sensor");
}
示例3: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(4);
final Hashtable<String, String> jmsEnv = new Hashtable<>();
jmsEnv.put(InitialContext.INITIAL_CONTEXT_FACTORY, "com.solacesystems.jndi.SolJNDIInitialContextFactory");
jmsEnv.put(InitialContext.PROVIDER_URL, "smf://192.168.56.101");
jmsEnv.put(Context.SECURITY_PRINCIPAL, "[email protected]_vpn");
jmsEnv.put(Context.SECURITY_CREDENTIALS, "password");
env.addSource(new JMSTopicSource<String>(jmsEnv,
"flink_cf",
"flink/topic",
new JMSTextTranslator()))
.print();
env.execute();
}
示例4: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String... args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<WikipediaEditEvent> edits = env.addSource(new WikipediaEditsSource());
edits
.timeWindowAll(Time.minutes(1))
.apply(new AllWindowFunction<WikipediaEditEvent, Tuple3<Date, Long, Long>, TimeWindow>() {
@Override
public void apply(TimeWindow timeWindow, Iterable<WikipediaEditEvent> iterable, Collector<Tuple3<Date, Long, Long>> collector) throws Exception {
long count = 0;
long bytesChanged = 0;
for (WikipediaEditEvent event : iterable) {
count++;
bytesChanged += event.getByteDiff();
}
collector.collect(new Tuple3<>(new Date(timeWindow.getEnd()), count, bytesChanged));
}
})
.print();
env.execute();
}
示例5: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@SuppressWarnings("Convert2Lambda")
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<String> dataStream = streamExecutionEnvironment.readTextFile("file:///tmp/flink-esper-input");
EsperStream<String> esperStream = Esper.pattern(dataStream, "select bytes from String");
DataStream<String> result = esperStream.select(new EsperSelectFunction<String>() {
private static final long serialVersionUID = 7093943872082195786L;
@Override
public String select(EventBean eventBean) throws Exception {
return new String((byte[]) eventBean.get("bytes"));
}
});
result.writeAsText("file:///tmp/flink-esper-output");
streamExecutionEnvironment.execute("Simple Flink Esper Example");
}
示例6: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
// set up the execution environment
final StreamExecutionEnvironment env = new StreamExecutionEnvBuilder().build();
// Get the json config for parsing the raw input stream
String parsingConfig = AppUtils.getParsingJsonConfig();
KeyedStream<Tuple3<String, Long, String>, Tuple> kaydRawMessagesStream =
setupKayedRawMessagesStream(env, parsingConfig);
String outputStreamTopicName = configs.getStringProp("inputStreamTopicName");
double streamDelayScale = configs.getDoubleProp("streamDelayScale");
Properties producerProps = AppUtils.getKafkaProducerProperties();
// replay the stream
kaydRawMessagesStream.map(new StreamPlayer(streamDelayScale, outputStreamTopicName,
producerProps)).setParallelism(1);
// execute program
env.execute("datAcron In-Situ Processing AIS Message Stream Simulator"
+ AppUtils.getAppVersion());
}
示例7: tryExecute
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static JobExecutionResult tryExecute(StreamExecutionEnvironment see, String name) throws Exception {
try {
return see.execute(name);
}
catch (ProgramInvocationException | JobExecutionException root) {
Throwable cause = root.getCause();
// search for nested SuccessExceptions
int depth = 0;
while (!(cause instanceof SuccessException)) {
if (cause == null || depth++ == 20) {
root.printStackTrace();
fail("Test failed: " + root.getMessage());
}
else {
cause = cause.getCause();
}
}
}
return null;
}
示例8: testTimestampHandling
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
* These check whether timestamps are properly assigned at the sources and handled in
* network transmission and between chained operators when timestamps are enabled.
*/
@Test
public void testTimestampHandling() throws Exception {
final int numElements = 10;
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
env.setParallelism(PARALLELISM);
env.getConfig().disableSysoutLogging();
DataStream<Integer> source1 = env.addSource(new MyTimestampSource(0L, numElements));
DataStream<Integer> source2 = env.addSource(new MyTimestampSource(0L, numElements));
source1
.map(new IdentityMap())
.connect(source2).map(new IdentityCoMap())
.transform("Custom Operator", BasicTypeInfo.INT_TYPE_INFO, new TimestampCheckingOperator())
.addSink(new DiscardingSink<Integer>());
env.execute();
}
示例9: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder()
.url("http://localhost:8080/nifi")
.portName("Data for Flink")
.requestBatchCount(5)
.buildConfig();
SourceFunction<NiFiDataPacket> nifiSource = new NiFiSource(clientConfig);
DataStream<NiFiDataPacket> streamSource = env.addSource(nifiSource).setParallelism(2);
DataStream<String> dataStream = streamSource.map(new MapFunction<NiFiDataPacket, String>() {
@Override
public String map(NiFiDataPacket value) throws Exception {
return new String(value.getContent(), Charset.defaultCharset());
}
});
dataStream.print();
env.execute();
}
示例10: publishUsingFlinkConnector
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
private void publishUsingFlinkConnector(AppConfiguration appConfiguration) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamId streamId = getStreamId();
FlinkPravegaWriter<Event> writer = pravega.newWriter(streamId, Event.class, new EventRouter());
int parallelism = appConfiguration.getPipeline().getParallelism();
if(appConfiguration.getProducer().isControlledEnv()) {
if(!(env instanceof LocalStreamEnvironment)) {
throw new Exception("Use a local Flink environment or set controlledEnv to false in app.json.");
}
//setting this to single instance since the controlled run allows user inout to trigger error events
env.setParallelism(1);
long latency = appConfiguration.getProducer().getLatencyInMilliSec();
int capacity = appConfiguration.getProducer().getCapacity();
ControlledSourceContextProducer controlledSourceContextProducer = new ControlledSourceContextProducer(capacity, latency);
env.addSource(controlledSourceContextProducer).name("EventSource").addSink(writer).name("Pravega-" + streamId.getName());
} else {
env.setParallelism(parallelism);
SourceContextProducer sourceContextProducer = new SourceContextProducer(appConfiguration);
env.addSource(sourceContextProducer).name("EventSource").addSink(writer).name("Pravega-" + streamId.getName());
}
env.execute(appConfiguration.getName()+"-producer");
}
示例11: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
final ParameterTool params = ParameterTool.fromArgs(args);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().setGlobalJobParameters(params);
env.setParallelism(2);
env.enableCheckpointing(5000);
env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
env.setStateBackend(new FsStateBackend("file:///Users/zhouzhou/Binary/flink-1.3.2/testcheckpoints/"));
RawLogGroupListDeserializer deserializer = new RawLogGroupListDeserializer();
Properties configProps = new Properties();
configProps.put(ConfigConstants.LOG_ENDPOINT, sEndpoint);
configProps.put(ConfigConstants.LOG_ACCESSSKEYID, sAccessKeyId);
configProps.put(ConfigConstants.LOG_ACCESSKEY, sAccessKey);
configProps.put(ConfigConstants.LOG_PROJECT, sProject);
configProps.put(ConfigConstants.LOG_LOGSTORE, sLogstore);
configProps.put(ConfigConstants.LOG_MAX_NUMBER_PER_FETCH, "10");
configProps.put(ConfigConstants.LOG_CONSUMER_BEGIN_POSITION, Consts.LOG_FROM_CHECKPOINT);
configProps.put(ConfigConstants.LOG_CONSUMERGROUP, "23_ots_sla_etl_product");
DataStream<RawLogGroupList> logTestStream = env.addSource(
new FlinkLogConsumer<RawLogGroupList>(deserializer, configProps)
);
logTestStream.writeAsText("/Users/zhouzhou/Binary/flink-1.3.2/data/newb.txt." + System.nanoTime());
env.execute("flink log connector");
}
示例12: exactlyOnceWriteSimulator
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public void exactlyOnceWriteSimulator(final StreamId outStreamId, final StreamUtils streamUtils, int numElements) throws Exception {
final int checkpointInterval = 100;
final int restartAttempts = 1;
final long delayBetweenAttempts = 0L;
//30 sec timeout for all
final long txTimeout = 30 * 1000;
final long txTimeoutMax = 30 * 1000;
final long txTimeoutGracePeriod = 30 * 1000;
final String jobName = "ExactlyOnceSimulator";
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(parallelism);
env.enableCheckpointing(checkpointInterval);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(restartAttempts, delayBetweenAttempts));
// Pravega Writer
FlinkPravegaWriter<Integer> pravegaExactlyOnceWriter = streamUtils.newExactlyOnceWriter(outStreamId,
Integer.class, new IdentityRouter<>());
env
.addSource(new IntegerCounterSourceGenerator(numElements))
.map(new FailingIdentityMapper<>(numElements / parallelism / 2))
.rebalance()
.addSink(pravegaExactlyOnceWriter);
env.execute(jobName);
}
示例13: standardReadWriteSimulator
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public void standardReadWriteSimulator(final StreamId inStreamId, final StreamId outStreamId, final StreamUtils streamUtils, int numElements) throws Exception {
final int checkpointInterval = 100;
final int taskFailureRestartAttempts = 1;
final long delayBetweenRestartAttempts = 0L;
final long startTime = 0L;
final String jobName = "standardReadWriteSimulator";
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(parallelism);
env.enableCheckpointing(checkpointInterval);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(taskFailureRestartAttempts, delayBetweenRestartAttempts));
// the Pravega reader
final FlinkPravegaReader<Integer> pravegaSource = streamUtils.getFlinkPravegaParams().newReader(inStreamId, startTime, Integer.class);
// Pravega Writer
FlinkPravegaWriter<Integer> pravegaWriter = streamUtils.getFlinkPravegaParams().newWriter(outStreamId, Integer.class, new IdentityRouter<>());
pravegaWriter.setPravegaWriterMode(PravegaWriterMode.ATLEAST_ONCE);
DataStream<Integer> stream = env.addSource(pravegaSource).map(new IdentityMapper<>());
stream.addSink(pravegaWriter);
stream.addSink(new IntSequenceExactlyOnceValidator(numElements));
env.execute(jobName);
}
示例14: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String... args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<WikipediaEditEvent> edits = env.addSource(new WikipediaEditsSource());
edits.filter((FilterFunction<WikipediaEditEvent>) edit -> {
return !edit.isBotEdit() && edit.getByteDiff() > 1000;
})
.print();
env.execute();
}
示例15: shouldSelectFromStreamUsingLambdaSelect
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
@SuppressWarnings("Convert2Lambda")
public void shouldSelectFromStreamUsingLambdaSelect() throws Exception {
StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
executionEnvironment.setParallelism(1);
DataStream<TestEvent> dataStream = executionEnvironment.fromElements(new TestEvent("peter1", 10), new TestEvent("alex1", 25), new TestEvent("maria1", 30));
EsperStream<TestEvent> esperStream = Esper.query(dataStream, "select name, age from TestEvent");
DataStream<TestEvent> resultStream = esperStream.select((EsperSelectFunction<TestEvent>) collector -> {
String name = (String) collector.get("name");
int age = (int) collector.get("age");
return new TestEvent(name, age);
});
resultStream.addSink(new SinkFunction<TestEvent>() {
private static final long serialVersionUID = 5588530728493738002L;
@Override
public void invoke(TestEvent testEvent) throws Exception {
result.add(testEvent);
}
});
executionEnvironment.execute("test-1");
assertThat(result, is(notNullValue()));
assertThat(result.size(), is(3));
}