當前位置: 首頁>>代碼示例>>Java>>正文


Java StreamExecutionEnvironment.setParallelism方法代碼示例

本文整理匯總了Java中org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.setParallelism方法的典型用法代碼示例。如果您正苦於以下問題:Java StreamExecutionEnvironment.setParallelism方法的具體用法?Java StreamExecutionEnvironment.setParallelism怎麽用?Java StreamExecutionEnvironment.setParallelism使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.flink.streaming.api.environment.StreamExecutionEnvironment的用法示例。


在下文中一共展示了StreamExecutionEnvironment.setParallelism方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);

        final Hashtable<String, String> jmsEnv = new Hashtable<>();
        jmsEnv.put(InitialContext.INITIAL_CONTEXT_FACTORY, "com.solacesystems.jndi.SolJNDIInitialContextFactory");
        jmsEnv.put(InitialContext.PROVIDER_URL, "smf://192.168.56.101");
        jmsEnv.put(Context.SECURITY_PRINCIPAL, "[email protected]_vpn");
        jmsEnv.put(Context.SECURITY_CREDENTIALS, "password");

        env.addSource(new JMSTopicSource<String>(jmsEnv,
                "flink_cf",
                "flink/topic",
                new JMSTextTranslator()))
                .print();

        env.execute();
    }
 
開發者ID:SolaceLabs,項目名稱:solace-integration-guides,代碼行數:20,代碼來源:BasicTopicStreamingSample.java

示例2: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);

        final Hashtable<String, String> jmsEnv = new Hashtable<>();
        jmsEnv.put(InitialContext.INITIAL_CONTEXT_FACTORY, "com.solacesystems.jndi.SolJNDIInitialContextFactory");
        jmsEnv.put(InitialContext.PROVIDER_URL, "smf://192.168.56.101");
        jmsEnv.put(Context.SECURITY_PRINCIPAL, "[email protected]_vpn");
        jmsEnv.put(Context.SECURITY_CREDENTIALS, "password");

        env.addSource(new JMSQueueSource<String>(jmsEnv,
                "flink_cf",
                "flink_queue",
                new JMSTextTranslator()))
                .print();

        env.execute();
    }
 
開發者ID:SolaceLabs,項目名稱:solace-integration-guides,代碼行數:20,代碼來源:BasicQueueStreamingSample.java

示例3: publishUsingFlinkConnector

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
private void publishUsingFlinkConnector(AppConfiguration appConfiguration) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	StreamId streamId = getStreamId();
	FlinkPravegaWriter<Event> writer = pravega.newWriter(streamId, Event.class, new EventRouter());

	if(appConfiguration.getProducer().isControlledEnv()) {
		if(!(env instanceof LocalStreamEnvironment)) {
			throw new Exception("Use a local Flink environment or set controlledEnv to false in app.json.");
		}
		//setting this to single instance since the controlled run allows user inout to trigger error events
		env.setParallelism(1);
		long latency = appConfiguration.getProducer().getLatencyInMilliSec();
		int capacity = appConfiguration.getProducer().getCapacity();
		ControlledSourceContextProducer controlledSourceContextProducer = new ControlledSourceContextProducer(capacity, latency);
		env.addSource(controlledSourceContextProducer).name("EventSource").addSink(writer).name("Pravega-" + streamId.getName());
	} else {
		SourceContextProducer sourceContextProducer = new SourceContextProducer(appConfiguration);
		env.addSource(sourceContextProducer).name("EventSource").addSink(writer).name("Pravega-" + streamId.getName());
	}

	env.execute(appConfiguration.getName()+"-producer");
}
 
開發者ID:pravega,項目名稱:nautilus-samples,代碼行數:24,代碼來源:PravegaEventPublisher.java

示例4: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(4);

		DataStream<Tuple2<Long, Long>> stream = env.addSource(new DataSource());

		stream
			.keyBy(0)
			.timeWindow(Time.of(2500, MILLISECONDS), Time.of(500, MILLISECONDS))
			.reduce(new SummingReducer())

			// alternative: use a apply function which does not pre-aggregate
//			.keyBy(new FirstFieldKeyExtractor<Tuple2<Long, Long>, Long>())
//			.window(Time.of(2500, MILLISECONDS), Time.of(500, MILLISECONDS))
//			.apply(new SummingWindowFunction())

			.addSink(new SinkFunction<Tuple2<Long, Long>>() {
				@Override
				public void invoke(Tuple2<Long, Long> value) {
				}
			});

		env.execute();
	}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:26,代碼來源:GroupedProcessingTimeWindowExample.java

示例5: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
	see.setParallelism(1);

	DataStream<String> simpleStringStream = see.addSource(new EventsGenerator());

	Properties kinesisProducerConfig = new Properties();
	kinesisProducerConfig.setProperty(AWSConfigConstants.AWS_REGION, pt.getRequired("region"));
	kinesisProducerConfig.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, pt.getRequired("accessKey"));
	kinesisProducerConfig.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, pt.getRequired("secretKey"));

	FlinkKinesisProducer<String> kinesis = new FlinkKinesisProducer<>(
			new SimpleStringSchema(), kinesisProducerConfig);

	kinesis.setFailOnError(true);
	kinesis.setDefaultStream("flink-test");
	kinesis.setDefaultPartition("0");

	simpleStringStream.addSink(kinesis);

	see.execute();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:25,代碼來源:ProduceIntoKinesis.java

示例6: testDisabledTimestamps

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
/**
 * These check whether timestamps are properly ignored when they are disabled.
 */
@Test
public void testDisabledTimestamps() throws Exception {
	final int numElements = 10;

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
	env.setParallelism(PARALLELISM);
	env.getConfig().disableSysoutLogging();

	DataStream<Integer> source1 = env.addSource(new MyNonWatermarkingSource(numElements));
	DataStream<Integer> source2 = env.addSource(new MyNonWatermarkingSource(numElements));

	source1
			.map(new IdentityMap())
			.connect(source2).map(new IdentityCoMap())
			.transform("Custom Operator", BasicTypeInfo.INT_TYPE_INFO, new DisabledTimestampCheckingOperator())
			.addSink(new DiscardingSink<Integer>());

	env.execute();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:25,代碼來源:TimestampITCase.java

示例7: runPartitioningProgram

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
private static void runPartitioningProgram(int jobManagerPort, int parallelism) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", jobManagerPort);
	env.setParallelism(parallelism);
	env.getConfig().enableObjectReuse();

	env.setBufferTimeout(5L);
	env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);

	env
		.addSource(new TimeStampingSource())
		.map(new IdMapper<Tuple2<Long, Long>>())
		.keyBy(0)
		.addSink(new TimestampingSink());

	env.execute("Partitioning Program");
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:17,代碼來源:StreamingScalabilityAndLatency.java

示例8: testTimestampHandling

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
/**
 * These check whether timestamps are properly assigned at the sources and handled in
 * network transmission and between chained operators when timestamps are enabled.
 */
@Test
public void testTimestampHandling() throws Exception {
	final int numElements = 10;

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	env.setParallelism(PARALLELISM);
	env.getConfig().disableSysoutLogging();

	DataStream<Integer> source1 = env.addSource(new MyTimestampSource(0L, numElements));
	DataStream<Integer> source2 = env.addSource(new MyTimestampSource(0L, numElements));

	source1
			.map(new IdentityMap())
			.connect(source2).map(new IdentityCoMap())
			.transform("Custom Operator", BasicTypeInfo.INT_TYPE_INFO, new TimestampCheckingOperator())
			.addSink(new DiscardingSink<Integer>());

	env.execute();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:26,代碼來源:TimestampITCase.java

示例9: testEventTimeSourceWithProcessingTime

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
/**
 * This verifies that an event time source works when setting stream time characteristic to
 * processing time. In this case, the watermarks should just be swallowed.
 */
@Test
public void testEventTimeSourceWithProcessingTime() throws Exception {
	StreamExecutionEnvironment env =
			StreamExecutionEnvironment.getExecutionEnvironment();

	env.setParallelism(2);
	env.getConfig().disableSysoutLogging();
	env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	DataStream<Integer> source1 = env.addSource(new MyTimestampSource(0, 10));

	source1
		.map(new IdentityMap())
		.transform("Watermark Check", BasicTypeInfo.INT_TYPE_INFO, new CustomOperator(false));

	env.execute();

	// verify that we don't get any watermarks, the source is used as watermark source in
	// other tests, so it normally emits watermarks
	Assert.assertTrue(CustomOperator.finalWatermarks[0].size() == 0);
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:26,代碼來源:TimestampITCase.java

示例10: testNodeHashIdenticalSources

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
/**
 * Tests that there are no collisions with two identical sources.
 *
 * <pre>
 * [ (src0) ] --\
 *               +--> [ (sink) ]
 * [ (src1) ] --/
 * </pre>
 */
@Test
public void testNodeHashIdenticalSources() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
	env.setParallelism(4);
	env.disableOperatorChaining();

	DataStream<String> src0 = env.addSource(new NoOpSourceFunction());
	DataStream<String> src1 = env.addSource(new NoOpSourceFunction());

	src0.union(src1).addSink(new NoOpSinkFunction());

	JobGraph jobGraph = env.getStreamGraph().getJobGraph();

	List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
	assertTrue(vertices.get(0).isInputVertex());
	assertTrue(vertices.get(1).isInputVertex());

	assertNotNull(vertices.get(0).getID());
	assertNotNull(vertices.get(1).getID());

	assertNotEquals(vertices.get(0).getID(), vertices.get(1).getID());
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:32,代碼來源:StreamingJobGraphGeneratorNodeHashTest.java

示例11: testProgram

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(20);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	RollingSink<String> sink = new RollingSink<String>(outPath)
			.setBucketer(new NonRollingBucketer())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:27,代碼來源:RollingSinkFaultToleranceITCase.java

示例12: shouldSelectFromStreamUsingAnonymousClassSelect

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Test
@SuppressWarnings("Convert2Lambda")
public void shouldSelectFromStreamUsingAnonymousClassSelect() throws Exception {
    StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
    executionEnvironment.setParallelism(1);

    DataStream<TestEvent> dataStream = executionEnvironment.fromElements(new TestEvent("peter", 10), new TestEvent("alex", 25), new TestEvent("maria", 30));

    EsperStream<TestEvent> esperStream = Esper.query(dataStream, "select name, age from TestEvent");

    DataStream<TestEvent> resultStream = esperStream.select(new EsperSelectFunction<TestEvent>() {
        private static final long serialVersionUID = 8802852465465541287L;

        @Override
        public TestEvent select(EventBean eventBean) throws Exception {
            String name = (String) eventBean.get("name");
            int age = (int) eventBean.get("age");
            return new TestEvent(name, age);
        }
    });

    resultStream.addSink(new SinkFunction<TestEvent>() {

        private static final long serialVersionUID = -8260794084029816089L;

        @Override
        public void invoke(TestEvent testEvent) throws Exception {
            System.err.println(testEvent);
            result.add(testEvent);
        }
    });

    executionEnvironment.execute("test-2");

    assertThat(result, is(notNullValue()));
    assertThat(result.size(), is(3));
}
 
開發者ID:phil3k3,項目名稱:flink-esper,代碼行數:38,代碼來源:EsperQueryTest.java

示例13: shouldSelectFromStreamUsingLambdaSelect

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Test
@SuppressWarnings("Convert2Lambda")
public void shouldSelectFromStreamUsingLambdaSelect() throws Exception {

    StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
    executionEnvironment.setParallelism(1);

    DataStream<TestEvent> dataStream = executionEnvironment.fromElements(new TestEvent("peter1", 10), new TestEvent("alex1", 25), new TestEvent("maria1", 30));

    EsperStream<TestEvent> esperStream = Esper.query(dataStream, "select name, age from TestEvent");

    DataStream<TestEvent> resultStream = esperStream.select((EsperSelectFunction<TestEvent>) collector -> {
        String name = (String) collector.get("name");
        int age = (int) collector.get("age");
        return new TestEvent(name, age);
    });

    resultStream.addSink(new SinkFunction<TestEvent>() {

        private static final long serialVersionUID = 5588530728493738002L;

        @Override
        public void invoke(TestEvent testEvent) throws Exception {
            result.add(testEvent);
        }
    });

    executionEnvironment.execute("test-1");

    assertThat(result, is(notNullValue()));
    assertThat(result.size(), is(3));
}
 
開發者ID:phil3k3,項目名稱:flink-esper,代碼行數:33,代碼來源:EsperQueryTest.java

示例14: shouldSelectFromStringDataStream

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Test
@SuppressWarnings("Convert2Lambda")
public void shouldSelectFromStringDataStream() throws Exception {
    StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
    executionEnvironment.setParallelism(1);

    List<String> expectedValues = Arrays.asList("first", "second");
    DataStream<String> dataStream = executionEnvironment.fromCollection(expectedValues);

    EsperStream<String> esperStream = Esper.query(dataStream, "select bytes from String");

    DataStream<String> resultStream = esperStream.select((EsperSelectFunction<String>) collector -> {
        byte[] bytes = (byte[]) collector.get("bytes");
        return new String(bytes);
    });

    resultStream.addSink(new SinkFunction<String>() {

        private static final long serialVersionUID = 284955963055337762L;

        @Override
        public void invoke(String testEvent) throws Exception {
            System.err.println(testEvent);
            stringResult.add(testEvent);
        }
    });

    executionEnvironment.execute("test-2");

    assertThat(stringResult, is(notNullValue()));
    assertThat(stringResult.size(), is(2));
    assertThat(stringResult, is(expectedValues));
}
 
開發者ID:phil3k3,項目名稱:flink-esper,代碼行數:34,代碼來源:EsperQueryTest.java

示例15: testEsperPattern

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Test
public void testEsperPattern() throws Exception {
    StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
    executionEnvironment.setParallelism(1);

    List<ComplexEvent> expectedValues = Lists.newArrayList();
    ComplexEvent complexEvent = new ComplexEvent(Event.start(), Event.end());
    expectedValues.add(complexEvent);

    List<Event> events = Arrays.asList(complexEvent.getStartEvent(), complexEvent.getEndEvent());
    DataStream<Event> dataStream = executionEnvironment.fromCollection(events);

    EsperStream<Event> esperStream = Esper.pattern(dataStream, "every (A=Event(type='start') -> B=Event(type='end'))");

    DataStream<ComplexEvent> complexEventDataStream = esperStream.select(new EsperSelectFunction<ComplexEvent>() {
        @Override
        public ComplexEvent select(EventBean eventBean) throws Exception {
            return new ComplexEvent((Event) eventBean.get("A"), (Event) eventBean.get("B"));
        }
    });

    complexEventDataStream.addSink(new SinkFunction<ComplexEvent>() {
        @Override
        public void invoke(ComplexEvent value) throws Exception {
            System.err.println(value);
            resultingEvents.add(value);
        }
    });

    executionEnvironment.execute("test-2");

    assertThat(resultingEvents, is(expectedValues));
}
 
開發者ID:phil3k3,項目名稱:flink-esper,代碼行數:34,代碼來源:EsperPatternTest.java


注:本文中的org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.setParallelism方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。