当前位置: 首页>>代码示例>>Java>>正文


Java DataStream.getType方法代码示例

本文整理汇总了Java中org.apache.flink.streaming.api.datastream.DataStream.getType方法的典型用法代码示例。如果您正苦于以下问题:Java DataStream.getType方法的具体用法?Java DataStream.getType怎么用?Java DataStream.getType使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.streaming.api.datastream.DataStream的用法示例。


在下文中一共展示了DataStream.getType方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: addSink

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Writes a DataStream into a Cassandra database.
 *
 * @param input input DataStream
 * @param <IN>  input type
 * @return CassandraSinkBuilder, to further configure the sink
 */
public static <IN> CassandraSinkBuilder<IN> addSink(DataStream<IN> input) {
	TypeInformation<IN> typeInfo = input.getType();
	if (typeInfo instanceof TupleTypeInfo) {
		DataStream<Tuple> tupleInput = (DataStream<Tuple>) input;
		return (CassandraSinkBuilder<IN>) new CassandraTupleSinkBuilder<>(tupleInput, tupleInput.getType(), tupleInput.getType().createSerializer(tupleInput.getExecutionEnvironment().getConfig()));
	}
	if (typeInfo instanceof RowTypeInfo) {
		DataStream<Row> rowInput = (DataStream<Row>) input;
		return (CassandraSinkBuilder<IN>) new CassandraRowSinkBuilder(rowInput, rowInput.getType(), rowInput.getType().createSerializer(rowInput.getExecutionEnvironment().getConfig()));
	}
	if (typeInfo instanceof PojoTypeInfo) {
		return new CassandraPojoSinkBuilder<>(input, input.getType(), input.getType().createSerializer(input.getExecutionEnvironment().getConfig()));
	}
	if (typeInfo instanceof CaseClassTypeInfo) {
		DataStream<Product> productInput = (DataStream<Product>) input;
		return (CassandraSinkBuilder<IN>) new CassandraScalaProductSinkBuilder<>(productInput, productInput.getType(), productInput.getType().createSerializer(input.getExecutionEnvironment().getConfig()));
	}
	throw new IllegalArgumentException("No support for the type of the given DataStream: " + input.getType());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:CassandraSink.java

示例2: registerStream

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Define siddhi stream with streamId, source <code>DataStream</code> and stream schema.
 *
 * @param streamId Unique siddhi streamId
 * @param dataStream DataStream to bind to the siddhi stream.
 * @param fieldNames Siddhi stream schema field names
    */
public <T> void registerStream(final String streamId, DataStream<T> dataStream, String... fieldNames) {
	Preconditions.checkNotNull(streamId,"streamId");
	Preconditions.checkNotNull(dataStream,"dataStream");
	Preconditions.checkNotNull(fieldNames,"fieldNames");
	if (isStreamDefined(streamId)) {
		throw new DuplicatedStreamException("Input stream: " + streamId + " already exists");
	}
	dataStreams.put(streamId, dataStream);
	SiddhiStreamSchema<T> schema = new SiddhiStreamSchema<>(dataStream.getType(), fieldNames);
	schema.setTypeSerializer(schema.getTypeInfo().createSerializer(dataStream.getExecutionConfig()));
	dataStreamSchemas.put(streamId, schema);
}
 
开发者ID:haoch,项目名称:flink-siddhi,代码行数:20,代码来源:SiddhiCEP.java

示例3: registerStream

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Define siddhi stream with streamId, source <code>DataStream</code> and stream schema.
 *
 * @param streamId Unique siddhi streamId
 * @param dataStream DataStream to bind to the siddhi stream.
 * @param fieldNames Siddhi stream schema field names
 */
public <T> void registerStream(final String streamId, DataStream<T> dataStream, String... fieldNames) {
    Preconditions.checkNotNull(streamId,"streamId");
    Preconditions.checkNotNull(dataStream,"dataStream");
    Preconditions.checkNotNull(fieldNames,"fieldNames");
    if (isStreamDefined(streamId)) {
        throw new DuplicatedStreamException("Input stream: " + streamId + " already exists");
    }
    dataStreams.put(streamId, dataStream);
    SiddhiStreamSchema<T> schema = new SiddhiStreamSchema<>(dataStream.getType(), fieldNames);
    schema.setTypeSerializer(schema.getTypeInfo().createSerializer(dataStream.getExecutionConfig()));
    dataStreamSchemas.put(streamId, schema);
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:20,代码来源:SiddhiCEP.java

示例4: testValueState

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Tests simple value state queryable state instance. Each source emits
 * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
 * queried. The tests succeeds after each subtask index is queried with
 * value numElements (the latest element updated the state).
 */
@Test
public void testValueState() throws Exception {

	final Deadline deadline = TEST_TIMEOUT.fromNow();
	final long numElements = 1024L;

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStateBackend(stateBackend);
	env.setParallelism(maxParallelism);
	// Very important, because cluster is shared between tests and we
	// don't explicitly check that all slots are available before
	// submitting.
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));

	DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));

	// Value state
	ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>("any", source.getType());

	source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
		private static final long serialVersionUID = 7662520075515707428L;

		@Override
		public Integer getKey(Tuple2<Integer, Long> value) {
			return value.f0;
		}
	}).asQueryableState("hakuna", valueState);

	try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(cluster, env, deadline)) {

		final JobID jobId = autoCancellableJob.getJobId();
		final JobGraph jobGraph = autoCancellableJob.getJobGraph();

		cluster.submitJobDetached(jobGraph);

		executeValueQuery(deadline, client, jobId, "hakuna", valueState, numElements);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:45,代码来源:AbstractQueryableStateTestBase.java

示例5: addSink

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Writes a DataStream into a Cassandra database.
 *
 * @param input input DataStream
 * @param <IN>  input type
 * @return CassandraSinkBuilder, to further configure the sink
 */
public static <IN, T extends Tuple> CassandraSinkBuilder<IN> addSink(DataStream<IN> input) {
	if (input.getType() instanceof TupleTypeInfo) {
		DataStream<T> tupleInput = (DataStream<T>) input;
		return (CassandraSinkBuilder<IN>) new CassandraTupleSinkBuilder<>(tupleInput, tupleInput.getType(), tupleInput.getType().createSerializer(tupleInput.getExecutionEnvironment().getConfig()));
	} else {
		return new CassandraPojoSinkBuilder<>(input, input.getType(), input.getType().createSerializer(input.getExecutionEnvironment().getConfig()));
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:16,代码来源:CassandraSink.java

示例6: HTMStream

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
HTMStream(final DataStream<T> input, NetworkFactory<T> networkFactory) {
    this.inferenceStreamBuilder = new InferenceStreamBuilder(input, networkFactory);
    this.inputType = input.getType();
}
 
开发者ID:htm-community,项目名称:flink-htm,代码行数:5,代码来源:HTMStream.java

示例7: testValueState

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Tests simple value state queryable state instance. Each source emits
 * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
 * queried. The tests succeeds after each subtask index is queried with
 * value numElements (the latest element updated the state).
 */
@Test
public void testValueState() throws Exception {
	// Config
	final Deadline deadline = TEST_TIMEOUT.fromNow();

	final int numElements = 1024;

	final QueryableStateClient client = new QueryableStateClient(cluster.configuration());

	JobID jobId = null;
	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setStateBackend(stateBackend);
		env.setParallelism(maxParallelism);
		// Very important, because cluster is shared between tests and we
		// don't explicitly check that all slots are available before
		// submitting.
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));

		DataStream<Tuple2<Integer, Long>> source = env
				.addSource(new TestAscendingValueSource(numElements));

		// Value state
		ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
				"any",
				source.getType());

		QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
				source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
					private static final long serialVersionUID = 7662520075515707428L;

					@Override
					public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
						return value.f0;
					}
				}).asQueryableState("hakuna", valueState);

		// Submit the job graph
		JobGraph jobGraph = env.getStreamGraph().getJobGraph();
		jobId = jobGraph.getJobID();

		cluster.submitJobDetached(jobGraph);

		// Now query
		long expected = numElements;

		executeQuery(deadline, client, jobId, "hakuna", valueState, expected);
	} finally {
		// Free cluster resources
		if (jobId != null) {
			Future<CancellationSuccess> cancellation = cluster
					.getLeaderGateway(deadline.timeLeft())
					.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
					.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));

			Await.ready(cancellation, deadline.timeLeft());
		}

		client.shutDown();
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:68,代码来源:AbstractQueryableStateITCase.java

示例8: testQueryNonStartedJobState

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Similar tests as {@link #testValueState()} but before submitting the
 * job, we already issue one request which fails.
 */
@Test
public void testQueryNonStartedJobState() throws Exception {
	// Config
	final Deadline deadline = TEST_TIMEOUT.fromNow();

	final int numElements = 1024;

	final QueryableStateClient client = new QueryableStateClient(cluster.configuration());

	JobID jobId = null;
	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setStateBackend(stateBackend);
		env.setParallelism(maxParallelism);
		// Very important, because cluster is shared between tests and we
		// don't explicitly check that all slots are available before
		// submitting.
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));

		DataStream<Tuple2<Integer, Long>> source = env
			.addSource(new TestAscendingValueSource(numElements));

		// Value state
		ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
			"any",
			source.getType(),
			null);

		QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
			source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
				private static final long serialVersionUID = 7480503339992214681L;

				@Override
				public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
					return value.f0;
				}
			}).asQueryableState("hakuna", valueState);

		// Submit the job graph
		JobGraph jobGraph = env.getStreamGraph().getJobGraph();
		jobId = jobGraph.getJobID();

		// Now query
		long expected = numElements;

		// query once
		client.getKvState(
				jobId,
				queryableState.getQueryableStateName(),
				0,
				VoidNamespace.INSTANCE,
				BasicTypeInfo.INT_TYPE_INFO,
				VoidNamespaceTypeInfo.INSTANCE,
				valueState);

		cluster.submitJobDetached(jobGraph);

		executeQuery(deadline, client, jobId, "hakuna", valueState, expected);
	} finally {
		// Free cluster resources
		if (jobId != null) {
			Future<CancellationSuccess> cancellation = cluster
				.getLeaderGateway(deadline.timeLeft())
				.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
				.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));

			Await.ready(cancellation, deadline.timeLeft());
		}

		client.shutDown();
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:77,代码来源:AbstractQueryableStateITCase.java

示例9: testReducingState

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Tests simple reducing state queryable state instance. Each source emits
 * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
 * queried. The reducing state instance sums these up. The test succeeds
 * after each subtask index is queried with result n*(n+1)/2.
 */
@Test
public void testReducingState() throws Exception {
	// Config
	final Deadline deadline = TEST_TIMEOUT.fromNow();

	final int numElements = 1024;

	final QueryableStateClient client = new QueryableStateClient(cluster.configuration());

	JobID jobId = null;
	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setStateBackend(stateBackend);
		env.setParallelism(maxParallelism);
		// Very important, because cluster is shared between tests and we
		// don't explicitly check that all slots are available before
		// submitting.
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));

		DataStream<Tuple2<Integer, Long>> source = env
				.addSource(new TestAscendingValueSource(numElements));

		// Reducing state
		ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState =
				new ReducingStateDescriptor<>(
						"any",
						new SumReduce(),
						source.getType());

		QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
				source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
					private static final long serialVersionUID = 8470749712274833552L;

					@Override
					public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
						return value.f0;
					}
				}).asQueryableState("jungle", reducingState);

		// Submit the job graph
		JobGraph jobGraph = env.getStreamGraph().getJobGraph();
		jobId = jobGraph.getJobID();

		cluster.submitJobDetached(jobGraph);

		// Wait until job is running

		// Now query
		long expected = numElements * (numElements + 1) / 2;

		executeQuery(deadline, client, jobId, "jungle", reducingState, expected);
	} finally {
		// Free cluster resources
		if (jobId != null) {
			Future<CancellationSuccess> cancellation = cluster
					.getLeaderGateway(deadline.timeLeft())
					.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
					.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));

			Await.ready(cancellation, deadline.timeLeft());
		}

		client.shutDown();
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:72,代码来源:AbstractQueryableStateITCase.java

示例10: testValueState

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Tests simple value state queryable state instance. Each source emits
 * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
 * queried. The tests succeeds after each subtask index is queried with
 * value numElements (the latest element updated the state).
 */
@Test
public void testValueState() throws Exception {
	// Config
	final Deadline deadline = TEST_TIMEOUT.fromNow();

	final int numElements = 1024;

	final QueryableStateClient client = new QueryableStateClient(cluster.configuration());

	JobID jobId = null;
	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(NUM_SLOTS);
		// Very important, because cluster is shared between tests and we
		// don't explicitly check that all slots are available before
		// submitting.
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));

		DataStream<Tuple2<Integer, Long>> source = env
				.addSource(new TestAscendingValueSource(numElements));

		// Value state
		ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
				"any",
				source.getType());

		QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
				source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
					@Override
					public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
						return value.f0;
					}
				}).asQueryableState("hakuna", valueState);

		// Submit the job graph
		JobGraph jobGraph = env.getStreamGraph().getJobGraph();
		jobId = jobGraph.getJobID();

		cluster.submitJobDetached(jobGraph);

		// Now query
		long expected = numElements;

		executeValueQuery(deadline, client, jobId, queryableState,
			expected);
	} finally {
		// Free cluster resources
		if (jobId != null) {
			Future<CancellationSuccess> cancellation = cluster
					.getLeaderGateway(deadline.timeLeft())
					.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
					.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));

			Await.ready(cancellation, deadline.timeLeft());
		}

		client.shutDown();
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:66,代码来源:QueryableStateITCase.java

示例11: testQueryNonStartedJobState

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Similar tests as {@link #testValueState()} but before submitting the
 * job, we already issue one request which fails.
 */
@Test
public void testQueryNonStartedJobState() throws Exception {
	// Config
	final Deadline deadline = TEST_TIMEOUT.fromNow();

	final int numElements = 1024;

	final QueryableStateClient client = new QueryableStateClient(cluster.configuration());

	JobID jobId = null;
	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(NUM_SLOTS);
		// Very important, because cluster is shared between tests and we
		// don't explicitly check that all slots are available before
		// submitting.
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));

		DataStream<Tuple2<Integer, Long>> source = env
			.addSource(new TestAscendingValueSource(numElements));

		// Value state
		ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
			"any",
			source.getType(),
			null);

		QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
			source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
				@Override
				public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
					return value.f0;
				}
			}).asQueryableState("hakuna", valueState);

		// Submit the job graph
		JobGraph jobGraph = env.getStreamGraph().getJobGraph();
		jobId = jobGraph.getJobID();

		// Now query
		long expected = numElements;

		// query once
		client.getKvState(jobId, queryableState.getQueryableStateName(), 0,
			KvStateRequestSerializer.serializeKeyAndNamespace(
				0,
				queryableState.getKeySerializer(),
				VoidNamespace.INSTANCE,
				VoidNamespaceSerializer.INSTANCE));

		cluster.submitJobDetached(jobGraph);

		executeValueQuery(deadline, client, jobId, queryableState,
			expected);
	} finally {
		// Free cluster resources
		if (jobId != null) {
			Future<CancellationSuccess> cancellation = cluster
				.getLeaderGateway(deadline.timeLeft())
				.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
				.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));

			Await.ready(cancellation, deadline.timeLeft());
		}

		client.shutDown();
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:73,代码来源:QueryableStateITCase.java

示例12: testReducingState

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Tests simple reducing state queryable state instance. Each source emits
 * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
 * queried. The reducing state instance sums these up. The test succeeds
 * after each subtask index is queried with result n*(n+1)/2.
 */
@Test
public void testReducingState() throws Exception {
	// Config
	final Deadline deadline = TEST_TIMEOUT.fromNow();

	final int numElements = 1024;

	final QueryableStateClient client = new QueryableStateClient(cluster.configuration());

	JobID jobId = null;
	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(NUM_SLOTS);
		// Very important, because cluster is shared between tests and we
		// don't explicitly check that all slots are available before
		// submitting.
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));

		DataStream<Tuple2<Integer, Long>> source = env
				.addSource(new TestAscendingValueSource(numElements));

		// Reducing state
		ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState =
				new ReducingStateDescriptor<>(
						"any",
						new SumReduce(),
						source.getType());

		QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
				source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
					@Override
					public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
						return value.f0;
					}
				}).asQueryableState("jungle", reducingState);

		// Submit the job graph
		JobGraph jobGraph = env.getStreamGraph().getJobGraph();
		jobId = jobGraph.getJobID();

		cluster.submitJobDetached(jobGraph);

		// Wait until job is running

		// Now query
		long expected = numElements * (numElements + 1) / 2;

		executeValueQuery(deadline, client, jobId, queryableState,
			expected);
	} finally {
		// Free cluster resources
		if (jobId != null) {
			Future<CancellationSuccess> cancellation = cluster
					.getLeaderGateway(deadline.timeLeft())
					.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
					.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));

			Await.ready(cancellation, deadline.timeLeft());
		}

		client.shutDown();
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:70,代码来源:QueryableStateITCase.java

示例13: testQueryNonStartedJobState

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Similar tests as {@link #testValueState()} but before submitting the
 * job, we already issue one request which fails.
 */
@Test
public void testQueryNonStartedJobState() throws Exception {

	final Deadline deadline = TEST_TIMEOUT.fromNow();
	final long numElements = 1024L;

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStateBackend(stateBackend);
	env.setParallelism(maxParallelism);
	// Very important, because cluster is shared between tests and we
	// don't explicitly check that all slots are available before
	// submitting.
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));

	DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));

	ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
		"any", source.getType(), 	null);

	QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
			source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {

				private static final long serialVersionUID = 7480503339992214681L;

				@Override
				public Integer getKey(Tuple2<Integer, Long> value) {
					return value.f0;
				}
			}).asQueryableState("hakuna", valueState);

	try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(cluster, env, deadline)) {

		final JobID jobId = autoCancellableJob.getJobId();
		final JobGraph jobGraph = autoCancellableJob.getJobGraph();

		long expected = numElements;

		// query once
		client.getKvState(
				autoCancellableJob.getJobId(),
				queryableState.getQueryableStateName(),
				0,
				BasicTypeInfo.INT_TYPE_INFO,
				valueState);

		cluster.submitJobDetached(jobGraph);

		executeValueQuery(deadline, client, jobId, "hakuna", valueState, expected);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:55,代码来源:AbstractQueryableStateTestBase.java

示例14: testValueStateDefault

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Tests simple value state queryable state instance with a default value
 * set. Each source emits (subtaskIndex, 0)..(subtaskIndex, numElements)
 * tuples, the key is mapped to 1 but key 0 is queried which should throw
 * a {@link UnknownKeyOrNamespaceException} exception.
 *
 * @throws UnknownKeyOrNamespaceException thrown due querying a non-existent key
 */
@Test(expected = UnknownKeyOrNamespaceException.class)
public void testValueStateDefault() throws Throwable {

	final Deadline deadline = TEST_TIMEOUT.fromNow();
	final long numElements = 1024L;

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStateBackend(stateBackend);
	env.setParallelism(maxParallelism);
	// Very important, because cluster is shared between tests and we
	// don't explicitly check that all slots are available before
	// submitting.
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));

	DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));

	ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
			"any", source.getType(), 	Tuple2.of(0, 1337L));

	// only expose key "1"
	QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState = source.keyBy(
			new KeySelector<Tuple2<Integer, Long>, Integer>() {
				private static final long serialVersionUID = 4509274556892655887L;

				@Override
				public Integer getKey(Tuple2<Integer, Long> value) {
					return 1;
				}
			}).asQueryableState("hakuna", valueState);

	try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(cluster, env, deadline)) {

		final JobID jobId = autoCancellableJob.getJobId();
		final JobGraph jobGraph = autoCancellableJob.getJobGraph();

		cluster.submitJobDetached(jobGraph);

		// Now query
		int key = 0;
		CompletableFuture<ValueState<Tuple2<Integer, Long>>> future = getKvState(
				deadline,
				client,
				jobId,
				queryableState.getQueryableStateName(),
				key,
				BasicTypeInfo.INT_TYPE_INFO,
				valueState,
				true,
				executor);

		try {
			future.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
		} catch (ExecutionException | CompletionException e) {
			// get() on a completedExceptionally future wraps the
			// exception in an ExecutionException.
			throw e.getCause();
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:68,代码来源:AbstractQueryableStateTestBase.java

示例15: testValueState

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Tests simple value state queryable state instance. Each source emits
 * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
 * queried. The tests succeeds after each subtask index is queried with
 * value numElements (the latest element updated the state).
 */
@Test
public void testValueState() throws Exception {
	// Config
	final Deadline deadline = TEST_TIMEOUT.fromNow();

	final long numElements = 1024L;

	final QueryableStateClient client = new QueryableStateClient(
			"localhost",
			Integer.parseInt(QueryableStateOptions.PROXY_PORT_RANGE.defaultValue()));

	JobID jobId = null;
	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setStateBackend(stateBackend);
		env.setParallelism(maxParallelism);
		// Very important, because cluster is shared between tests and we
		// don't explicitly check that all slots are available before
		// submitting.
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));

		DataStream<Tuple2<Integer, Long>> source = env
				.addSource(new TestAscendingValueSource(numElements));

		// Value state
		ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
				"any",
				source.getType());

		source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
			private static final long serialVersionUID = 7662520075515707428L;

			@Override
			public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
				return value.f0;
			}
		}).asQueryableState("hakuna", valueState);

		// Submit the job graph
		JobGraph jobGraph = env.getStreamGraph().getJobGraph();
		jobId = jobGraph.getJobID();

		cluster.submitJobDetached(jobGraph);

		executeValueQuery(deadline, client, jobId, "hakuna", valueState, numElements);
	} finally {
		// Free cluster resources
		if (jobId != null) {
			CompletableFuture<CancellationSuccess> cancellation = FutureUtils.toJava(cluster
					.getLeaderGateway(deadline.timeLeft())
					.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
					.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class)));

			cancellation.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
		}

		client.shutdown();
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:66,代码来源:AbstractQueryableStateITCase.java


注:本文中的org.apache.flink.streaming.api.datastream.DataStream.getType方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。