当前位置: 首页>>代码示例>>Java>>正文


Java StreamExecutionEnvironment.setRestartStrategy方法代码示例

本文整理汇总了Java中org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.setRestartStrategy方法的典型用法代码示例。如果您正苦于以下问题:Java StreamExecutionEnvironment.setRestartStrategy方法的具体用法?Java StreamExecutionEnvironment.setRestartStrategy怎么用?Java StreamExecutionEnvironment.setRestartStrategy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.streaming.api.environment.StreamExecutionEnvironment的用法示例。


在下文中一共展示了StreamExecutionEnvironment.setRestartStrategy方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.enableCheckpointing(1000);
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000));
	env.setStateBackend(new FsStateBackend("file:///" + System.getProperty("java.io.tmpdir") + "/flink/backend"));

	CassandraSink<Tuple2<String, Integer>> sink = CassandraSink.addSink(env.addSource(new MySource()))
		.setQuery("INSERT INTO example.values (id, counter) values (?, ?);")
		.enableWriteAheadLog()
		.setClusterBuilder(new ClusterBuilder() {

			private static final long serialVersionUID = 2793938419775311824L;

			@Override
			public Cluster buildCluster(Cluster.Builder builder) {
				return builder.addContactPoint("127.0.0.1").build();
			}
		})
		.build();

	sink.name("Cassandra Sink").disableChaining().setParallelism(1).uid("hello");

	env.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:26,代码来源:CassandraTupleWriteAheadSinkExample.java

示例2: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.enableCheckpointing(1000);
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000));
	env.setStateBackend(new FsStateBackend("file:///" + System.getProperty("java.io.tmpdir") + "/flink/backend"));

	CassandraSink<Tuple2<String, Integer>> sink = CassandraSink.addSink(env.addSource(new MySource()))
		.setQuery("INSERT INTO example.values (id, counter) values (?, ?);")
		.enableWriteAheadLog()
		.setClusterBuilder(new ClusterBuilder() {
			@Override
			public Cluster buildCluster(Cluster.Builder builder) {
				return builder.addContactPoint("127.0.0.1").build();
			}
		})
		.build();

	sink.name("Cassandra Sink").disableChaining().setParallelism(1).uid("hello");

	env.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:23,代码来源:CassandraTupleWriteAheadSinkExample.java

示例3: exactlyOnceWriteSimulator

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public void exactlyOnceWriteSimulator(final StreamId outStreamId, final StreamUtils streamUtils, int numElements) throws Exception {

		final int checkpointInterval = 100;

		final int restartAttempts = 1;
		final long delayBetweenAttempts = 0L;

		//30 sec timeout for all
		final long txTimeout = 30 * 1000;
		final long txTimeoutMax = 30 * 1000;
		final long txTimeoutGracePeriod = 30 * 1000;

		final String jobName = "ExactlyOnceSimulator";

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(parallelism);

		env.enableCheckpointing(checkpointInterval);
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(restartAttempts, delayBetweenAttempts));

		// Pravega Writer
		FlinkPravegaWriter<Integer> pravegaExactlyOnceWriter = streamUtils.newExactlyOnceWriter(outStreamId,
				Integer.class, new IdentityRouter<>());

		env
				.addSource(new IntegerCounterSourceGenerator(numElements))
				.map(new FailingIdentityMapper<>(numElements / parallelism / 2))
				.rebalance()
				.addSink(pravegaExactlyOnceWriter);

		env.execute(jobName);
	}
 
开发者ID:pravega,项目名称:nautilus-samples,代码行数:33,代码来源:EventCounterApp.java

示例4: standardReadWriteSimulator

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public void standardReadWriteSimulator(final StreamId inStreamId, final StreamId outStreamId, final StreamUtils streamUtils, int numElements) throws Exception {

		final int checkpointInterval = 100;
		final int taskFailureRestartAttempts = 1;
		final long delayBetweenRestartAttempts = 0L;
		final long startTime = 0L;
		final String jobName = "standardReadWriteSimulator";

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(parallelism);
		env.enableCheckpointing(checkpointInterval);
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(taskFailureRestartAttempts, delayBetweenRestartAttempts));

		// the Pravega reader
		final FlinkPravegaReader<Integer> pravegaSource = streamUtils.getFlinkPravegaParams().newReader(inStreamId, startTime, Integer.class);

		// Pravega Writer
		FlinkPravegaWriter<Integer> pravegaWriter = streamUtils.getFlinkPravegaParams().newWriter(outStreamId, Integer.class, new IdentityRouter<>());
		pravegaWriter.setPravegaWriterMode(PravegaWriterMode.ATLEAST_ONCE);

		DataStream<Integer> stream = env.addSource(pravegaSource).map(new IdentityMapper<>());

		stream.addSink(pravegaWriter);

		stream.addSink(new IntSequenceExactlyOnceValidator(numElements));

		env.execute(jobName);

	}
 
开发者ID:pravega,项目名称:nautilus-samples,代码行数:30,代码来源:EventCounterApp.java

示例5: createJobGraphWithKeyedAndNonPartitionedOperatorState

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
private static JobGraph createJobGraphWithKeyedAndNonPartitionedOperatorState(
		int parallelism,
		int maxParallelism,
		int fixedParallelism,
		int numberKeys,
		int numberElements,
		boolean terminateAfterEmission,
		int checkpointingInterval) {

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.getConfig().setMaxParallelism(maxParallelism);
	env.enableCheckpointing(checkpointingInterval);
	env.setRestartStrategy(RestartStrategies.noRestart());

	DataStream<Integer> input = env.addSource(new SubtaskIndexNonPartitionedStateSource(
			numberKeys,
			numberElements,
			terminateAfterEmission))
			.setParallelism(fixedParallelism)
			.keyBy(new KeySelector<Integer, Integer>() {
				private static final long serialVersionUID = -7952298871120320940L;

				@Override
				public Integer getKey(Integer value) throws Exception {
					return value;
				}
			});

	SubtaskIndexFlatMapper.workCompletedLatch = new CountDownLatch(numberKeys);

	DataStream<Tuple2<Integer, Integer>> result = input.flatMap(new SubtaskIndexFlatMapper(numberElements));

	result.addSink(new CollectionSink<Tuple2<Integer, Integer>>());

	return env.getStreamGraph().getJobGraph();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:38,代码来源:RescalingITCase.java

示例6: testValueState

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
 * Tests simple value state queryable state instance. Each source emits
 * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
 * queried. The tests succeeds after each subtask index is queried with
 * value numElements (the latest element updated the state).
 */
@Test
public void testValueState() throws Exception {

	final Deadline deadline = TEST_TIMEOUT.fromNow();
	final long numElements = 1024L;

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStateBackend(stateBackend);
	env.setParallelism(maxParallelism);
	// Very important, because cluster is shared between tests and we
	// don't explicitly check that all slots are available before
	// submitting.
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));

	DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));

	// Value state
	ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>("any", source.getType());

	source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
		private static final long serialVersionUID = 7662520075515707428L;

		@Override
		public Integer getKey(Tuple2<Integer, Long> value) {
			return value.f0;
		}
	}).asQueryableState("hakuna", valueState);

	try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(cluster, env, deadline)) {

		final JobID jobId = autoCancellableJob.getJobId();
		final JobGraph jobGraph = autoCancellableJob.getJobGraph();

		cluster.submitJobDetached(jobGraph);

		executeValueQuery(deadline, client, jobId, "hakuna", valueState, numElements);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:45,代码来源:AbstractQueryableStateTestBase.java

示例7: createJobGraphWithKeyedState

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
private static JobGraph createJobGraphWithKeyedState(
		int parallelism,
		int maxParallelism,
		int numberKeys,
		int numberElements,
		boolean terminateAfterEmission,
		int checkpointingInterval) {

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	if (0 < maxParallelism) {
		env.getConfig().setMaxParallelism(maxParallelism);
	}
	env.enableCheckpointing(checkpointingInterval);
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.getConfig().setUseSnapshotCompression(true);

	DataStream<Integer> input = env.addSource(new SubtaskIndexSource(
			numberKeys,
			numberElements,
			terminateAfterEmission))
			.keyBy(new KeySelector<Integer, Integer>() {
				private static final long serialVersionUID = -7952298871120320940L;

				@Override
				public Integer getKey(Integer value) throws Exception {
					return value;
				}
			});

	SubtaskIndexFlatMapper.workCompletedLatch = new CountDownLatch(numberKeys);

	DataStream<Tuple2<Integer, Integer>> result = input.flatMap(new SubtaskIndexFlatMapper(numberElements));

	result.addSink(new CollectionSink<Tuple2<Integer, Integer>>());

	return env.getStreamGraph().getJobGraph();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:39,代码来源:RescalingITCase.java

示例8: runKeyValueTest

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public void runKeyValueTest() throws Exception {
	final String topic = "keyvaluetest";
	createTestTopic(topic, 1, 1);
	final int elementCount = 5000;

	// ----------- Write some data into Kafka -------------------

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.getConfig().disableSysoutLogging();

	DataStream<Tuple2<Long, PojoValue>> kvStream = env.addSource(new SourceFunction<Tuple2<Long, PojoValue>>() {
		@Override
		public void run(SourceContext<Tuple2<Long, PojoValue>> ctx) throws Exception {
			Random rnd = new Random(1337);
			for (long i = 0; i < elementCount; i++) {
				PojoValue pojo = new PojoValue();
				pojo.when = new Date(rnd.nextLong());
				pojo.lon = rnd.nextLong();
				pojo.lat = i;
				// make every second key null to ensure proper "null" serialization
				Long key = (i % 2 == 0) ? null : i;
				ctx.collect(new Tuple2<>(key, pojo));
			}
		}

		@Override
		public void cancel() {
		}
	});

	KeyedSerializationSchema<Tuple2<Long, PojoValue>> schema = new TypeInformationKeyValueSerializationSchema<>(Long.class, PojoValue.class, env.getConfig());
	Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings);
	producerProperties.setProperty("retries", "3");
	kafkaServer.produceIntoKafka(kvStream, topic, schema, producerProperties, null);
	env.execute("Write KV to Kafka");

	// ----------- Read the data again -------------------

	env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.getConfig().disableSysoutLogging();

	KeyedDeserializationSchema<Tuple2<Long, PojoValue>> readSchema = new TypeInformationKeyValueSerializationSchema<>(Long.class, PojoValue.class, env.getConfig());

	Properties props = new Properties();
	props.putAll(standardProps);
	props.putAll(secureProps);
	DataStream<Tuple2<Long, PojoValue>> fromKafka = env.addSource(kafkaServer.getConsumer(topic, readSchema, props));
	fromKafka.flatMap(new RichFlatMapFunction<Tuple2<Long, PojoValue>, Object>() {
		long counter = 0;
		@Override
		public void flatMap(Tuple2<Long, PojoValue> value, Collector<Object> out) throws Exception {
			// the elements should be in order.
			Assert.assertTrue("Wrong value " + value.f1.lat, value.f1.lat == counter);
			if (value.f1.lat % 2 == 0) {
				assertNull("key was not null", value.f0);
			} else {
				Assert.assertTrue("Wrong value " + value.f0, value.f0 == counter);
			}
			counter++;
			if (counter == elementCount) {
				// we got the right number of elements
				throw new SuccessException();
			}
		}
	});

	tryExecute(env, "Read KV from Kafka");

	deleteTestTopic(topic);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:75,代码来源:KafkaConsumerTestBase.java

示例9: testAtLeastOnceWriter

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
 * Tests the {@link FlinkPravegaWriter} in {@code AT_LEAST_ONCE} mode.
 */
@Test
public void testAtLeastOnceWriter() throws Exception {
    // set up the stream
    final String streamName = RandomStringUtils.randomAlphabetic(20);
    SETUP_UTILS.createTestStream(streamName, 1);

    StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.createLocalEnvironment()
            .setParallelism(1)
            .enableCheckpointing(1000, CheckpointingMode.EXACTLY_ONCE);
    execEnv.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));

    DataStreamSource<Integer> dataStream = execEnv
            .addSource(new IntegerGeneratingSource(true, EVENT_COUNT_PER_SOURCE));

    FlinkPravegaWriter<Integer> pravegaSink = new FlinkPravegaWriter<>(
            SETUP_UTILS.getControllerUri(),
            SETUP_UTILS.getScope(),
            streamName,
            new IntSerializer(),
            event -> "fixedkey");
    pravegaSink.setPravegaWriterMode(PravegaWriterMode.ATLEAST_ONCE);
    dataStream.addSink(pravegaSink).setParallelism(2);

    execEnv.execute();
    List<Integer> readElements = readAllEvents(streamName);

    // Now verify that all expected events are present in the stream. Having extra elements are fine since we are
    // testing the at-least-once writer.
    Collections.sort(readElements);
    int expectedEventValue = 0;
    for (int i = 0; i < readElements.size();) {
        if (readElements.get(i) != expectedEventValue) {
            throw new IllegalStateException("Element: " + expectedEventValue + " missing in the stream");
        }

        while (i < readElements.size() && readElements.get(i) == expectedEventValue) {
            i++;
        }
        expectedEventValue++;
    }
    Assert.assertEquals(expectedEventValue, EVENT_COUNT_PER_SOURCE);
}
 
开发者ID:pravega,项目名称:flink-connectors,代码行数:46,代码来源:FlinkPravegaWriterITCase.java

示例10: getFlinkJob

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
private JobGraph getFlinkJob(
        final int sourceParallelism,
        final String streamName,
        final int numElements) throws IOException {

    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(sourceParallelism);
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 0L));

    // to make the test faster, we use a combination of fast triggering of checkpoints,
    // but some pauses after completed checkpoints
    env.getCheckpointConfig().setCheckpointInterval(100);
    env.getCheckpointConfig().setMinPauseBetweenCheckpoints(2000);

    // we currently need this to work around the case where tasks are
    // started too late, a checkpoint was already triggered, and some tasks
    // never see the checkpoint event
    env.getCheckpointConfig().setCheckpointTimeout(5000);

    // checkpoint to files (but aggregate state below 1 MB) and don't to any async checkpoints
    env.setStateBackend(new FsStateBackend(tmpFolder.newFolder().toURI(), 1024 * 1024, false));

    // the Pravega reader
    final FlinkPravegaReader<Integer> pravegaSource = new FlinkPravegaReader<>(
            SETUP_UTILS.getControllerUri(),
            SETUP_UTILS.getScope(),
            Collections.singleton(streamName),
            0,
            new IntDeserializer(),
            "my_reader_name");

    env
            .addSource(pravegaSource)

            // hook in the notifying mapper
            .map(new NotifyingMapper<>())
            .setParallelism(1)

            // the sink validates that the exactly-once semantics hold
            // it must be non-parallel so that it sees all elements and can trivially
            // check for duplicates
            .addSink(new IntSequenceExactlyOnceValidator(numElements))
            .setParallelism(1);
    
    return env.getStreamGraph().getJobGraph();
}
 
开发者ID:pravega,项目名称:flink-connectors,代码行数:47,代码来源:FlinkPravegaReaderSavepointTest.java

示例11: exactlyOnceReadWriteSimulator

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public void exactlyOnceReadWriteSimulator(final StreamId inStreamId, final StreamId outStreamId,
										  final StreamUtils streamUtils, int numElements,
										  boolean generateData, boolean throttled) throws Exception {

	final int blockAtNum = numElements/2;
	final int sleepPerElement = 1;

	final int checkpointInterval = 100;
	final int taskFailureRestartAttempts = 3;
	final long delayBetweenRestartAttempts = 0L;
	final long startTime = 0L;
	final String jobName = "exactlyOnceReadWriteSimulator";

	//30 sec timeout for all
	final long txTimeout = 30 * 1000;
	final long txTimeoutMax = 30 * 1000;
	final long txTimeoutGracePeriod = 30 * 1000;

	EventStreamWriter<Integer> eventWriter;
	ThrottledIntegerWriter producer = null;

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.enableCheckpointing(checkpointInterval);
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(taskFailureRestartAttempts, delayBetweenRestartAttempts));

	// we currently need this to work around the case where tasks are started too late, a checkpoint was already triggered, and some tasks
	// never see the checkpoint event
	env.getCheckpointConfig().setCheckpointTimeout(2000);

	// the Pravega reader
	final FlinkPravegaReader<Integer> pravegaSource = streamUtils.getFlinkPravegaParams().newReader(inStreamId, startTime, Integer.class);

	// Pravega Writer
	FlinkPravegaWriter<Integer> pravegaExactlyOnceWriter = streamUtils.newExactlyOnceWriter(outStreamId,
			Integer.class, new IdentityRouter<>());

	DataStream<Integer> stream =
	env.addSource(pravegaSource)
			.map(new FailingIdentityMapper<>(numElements * 2 / 3))
			.setParallelism(1)

			.map(new NotifyingMapper<>())
			.setParallelism(1);

			stream.addSink(pravegaExactlyOnceWriter)
			.setParallelism(1);

			stream.addSink(new IntSequenceExactlyOnceValidator(numElements))
			.setParallelism(1);

	if (generateData) {
		eventWriter = streamUtils.createWriter(inStreamId.getName(), inStreamId.getScope());
		producer = new ThrottledIntegerWriter(eventWriter, numElements, blockAtNum, sleepPerElement, false);
		producer.start();
		if (throttled) {
			ThrottledIntegerWriter finalProducer = producer;
			TO_CALL_ON_COMPLETION.set(() -> finalProducer.unThrottle());
		}
	}

	try {
		env.execute(jobName);
	} catch (Exception e) {
		if (!(ExceptionUtils.getRootCause(e) instanceof IntSequenceExactlyOnceValidator.SuccessException)) {
			throw e;
		}
	}

	if (generateData && producer != null) producer.sync();

}
 
开发者ID:pravega,项目名称:nautilus-samples,代码行数:73,代码来源:EventCounterApp.java

示例12: testQueryNonStartedJobState

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
 * Similar tests as {@link #testValueState()} but before submitting the
 * job, we already issue one request which fails.
 */
@Test
public void testQueryNonStartedJobState() throws Exception {
	// Config
	final Deadline deadline = TEST_TIMEOUT.fromNow();

	final int numElements = 1024;

	final QueryableStateClient client = new QueryableStateClient(cluster.configuration());

	JobID jobId = null;
	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(NUM_SLOTS);
		// Very important, because cluster is shared between tests and we
		// don't explicitly check that all slots are available before
		// submitting.
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));

		DataStream<Tuple2<Integer, Long>> source = env
			.addSource(new TestAscendingValueSource(numElements));

		// Value state
		ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
			"any",
			source.getType(),
			null);

		QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
			source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
				@Override
				public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
					return value.f0;
				}
			}).asQueryableState("hakuna", valueState);

		// Submit the job graph
		JobGraph jobGraph = env.getStreamGraph().getJobGraph();
		jobId = jobGraph.getJobID();

		// Now query
		long expected = numElements;

		// query once
		client.getKvState(jobId, queryableState.getQueryableStateName(), 0,
			KvStateRequestSerializer.serializeKeyAndNamespace(
				0,
				queryableState.getKeySerializer(),
				VoidNamespace.INSTANCE,
				VoidNamespaceSerializer.INSTANCE));

		cluster.submitJobDetached(jobGraph);

		executeValueQuery(deadline, client, jobId, queryableState,
			expected);
	} finally {
		// Free cluster resources
		if (jobId != null) {
			Future<CancellationSuccess> cancellation = cluster
				.getLeaderGateway(deadline.timeLeft())
				.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
				.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));

			Await.ready(cancellation, deadline.timeLeft());
		}

		client.shutDown();
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:73,代码来源:QueryableStateITCase.java

示例13: runKeyValueTest

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public void runKeyValueTest() throws Exception {
	final String topic = "keyvaluetest";
	createTestTopic(topic, 1, 1);
	final int ELEMENT_COUNT = 5000;

	// ----------- Write some data into Kafka -------------------

	StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
	env.setParallelism(1);
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.getConfig().disableSysoutLogging();

	DataStream<Tuple2<Long, PojoValue>> kvStream = env.addSource(new SourceFunction<Tuple2<Long, PojoValue>>() {
		@Override
		public void run(SourceContext<Tuple2<Long, PojoValue>> ctx) throws Exception {
			Random rnd = new Random(1337);
			for (long i = 0; i < ELEMENT_COUNT; i++) {
				PojoValue pojo = new PojoValue();
				pojo.when = new Date(rnd.nextLong());
				pojo.lon = rnd.nextLong();
				pojo.lat = i;
				// make every second key null to ensure proper "null" serialization
				Long key = (i % 2 == 0) ? null : i;
				ctx.collect(new Tuple2<>(key, pojo));
			}
		}
		@Override
		public void cancel() {
		}
	});

	KeyedSerializationSchema<Tuple2<Long, PojoValue>> schema = new TypeInformationKeyValueSerializationSchema<>(Long.class, PojoValue.class, env.getConfig());
	Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings);
	producerProperties.setProperty("retries", "3");
	kafkaServer.produceIntoKafka(kvStream, topic, schema, producerProperties, null);
	env.execute("Write KV to Kafka");

	// ----------- Read the data again -------------------

	env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
	env.setParallelism(1);
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.getConfig().disableSysoutLogging();


	KeyedDeserializationSchema<Tuple2<Long, PojoValue>> readSchema = new TypeInformationKeyValueSerializationSchema<>(Long.class, PojoValue.class, env.getConfig());

	Properties props = new Properties();
	props.putAll(standardProps);
	props.putAll(secureProps);
	DataStream<Tuple2<Long, PojoValue>> fromKafka = env.addSource(kafkaServer.getConsumer(topic, readSchema, props));
	fromKafka.flatMap(new RichFlatMapFunction<Tuple2<Long,PojoValue>, Object>() {
		long counter = 0;
		@Override
		public void flatMap(Tuple2<Long, PojoValue> value, Collector<Object> out) throws Exception {
			// the elements should be in order.
			Assert.assertTrue("Wrong value " + value.f1.lat, value.f1.lat == counter );
			if (value.f1.lat % 2 == 0) {
				assertNull("key was not null", value.f0);
			} else {
				Assert.assertTrue("Wrong value " + value.f0, value.f0 == counter);
			}
			counter++;
			if (counter == ELEMENT_COUNT) {
				// we got the right number of elements
				throw new SuccessException();
			}
		}
	});

	tryExecute(env, "Read KV from Kafka");

	deleteTestTopic(topic);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:75,代码来源:KafkaConsumerTestBase.java

示例14: testSlidingTimeWindow

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testSlidingTimeWindow() {
	final int NUM_ELEMENTS_PER_KEY = 3000;
	final int WINDOW_SIZE = 1000;
	final int WINDOW_SLIDE = 100;
	final int NUM_KEYS = 100;
	FailingSource.reset();

	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(
				"localhost", cluster.getLeaderRPCPort());

		env.setMaxParallelism(2 * PARALLELISM);
		env.setParallelism(PARALLELISM);
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
		env.enableCheckpointing(100);
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 0));
		env.getConfig().disableSysoutLogging();
		env.setStateBackend(this.stateBackend);

		env
				.addSource(new FailingSource(NUM_KEYS, NUM_ELEMENTS_PER_KEY, NUM_ELEMENTS_PER_KEY / 3))
				.rebalance()
				.keyBy(0)
				.timeWindow(Time.of(WINDOW_SIZE, MILLISECONDS), Time.of(WINDOW_SLIDE, MILLISECONDS))
				.apply(new RichWindowFunction<Tuple2<Long, IntType>, Tuple4<Long, Long, Long, IntType>, Tuple, TimeWindow>() {

					private boolean open = false;

					@Override
					public void open(Configuration parameters) {
						assertEquals(PARALLELISM, getRuntimeContext().getNumberOfParallelSubtasks());
						open = true;
					}

					@Override
					public void apply(
							Tuple tuple,
							TimeWindow window,
							Iterable<Tuple2<Long, IntType>> values,
							Collector<Tuple4<Long, Long, Long, IntType>> out) {

						// validate that the function has been opened properly
						assertTrue(open);

						int sum = 0;
						long key = -1;

						for (Tuple2<Long, IntType> value : values) {
							sum += value.f1.value;
							key = value.f0;
						}
						out.collect(new Tuple4<>(key, window.getStart(), window.getEnd(), new IntType(sum)));
					}
				})
				.addSink(new ValidatingSink(NUM_KEYS, NUM_ELEMENTS_PER_KEY / WINDOW_SLIDE)).setParallelism(1);


		tryExecute(env, "Tumbling Window Test");
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:66,代码来源:EventTimeWindowCheckpointingITCase.java

示例15: testQueryNonStartedJobState

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
 * Similar tests as {@link #testValueState()} but before submitting the
 * job, we already issue one request which fails.
 */
@Test
public void testQueryNonStartedJobState() throws Exception {
	// Config
	final Deadline deadline = TEST_TIMEOUT.fromNow();

	final long numElements = 1024L;

	JobID jobId = null;
	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setStateBackend(stateBackend);
		env.setParallelism(maxParallelism);
		// Very important, because cluster is shared between tests and we
		// don't explicitly check that all slots are available before
		// submitting.
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));

		DataStream<Tuple2<Integer, Long>> source = env
			.addSource(new TestAscendingValueSource(numElements));

		// Value state
		ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
			"any",
			source.getType(),
			null);

		QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
			source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
				private static final long serialVersionUID = 7480503339992214681L;

				@Override
				public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
					return value.f0;
				}
			}).asQueryableState("hakuna", valueState);

		// Submit the job graph
		JobGraph jobGraph = env.getStreamGraph().getJobGraph();
		jobId = jobGraph.getJobID();

		// Now query
		long expected = numElements;

		// query once
		client.getKvState(
				jobId,
				queryableState.getQueryableStateName(),
				0,
				VoidNamespace.INSTANCE,
				BasicTypeInfo.INT_TYPE_INFO,
				VoidNamespaceTypeInfo.INSTANCE,
				valueState);

		cluster.submitJobDetached(jobGraph);

		executeValueQuery(deadline, client, jobId, "hakuna", valueState, expected);
	} finally {
		// Free cluster resources
		if (jobId != null) {
			CompletableFuture<CancellationSuccess> cancellation = FutureUtils.toJava(cluster
					.getLeaderGateway(deadline.timeLeft())
					.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
					.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class)));

			cancellation.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:73,代码来源:AbstractQueryableStateTestBase.java


注:本文中的org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.setRestartStrategy方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。