当前位置: 首页>>代码示例>>Java>>正文


Java ParameterTool.fromArgs方法代码示例

本文整理汇总了Java中org.apache.flink.api.java.utils.ParameterTool.fromArgs方法的典型用法代码示例。如果您正苦于以下问题:Java ParameterTool.fromArgs方法的具体用法?Java ParameterTool.fromArgs怎么用?Java ParameterTool.fromArgs使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.api.java.utils.ParameterTool的用法示例。


在下文中一共展示了ParameterTool.fromArgs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String args[]) throws Exception {
    ParameterTool tool = ParameterTool.fromArgs(args);

    Properties p = new Properties();
    p.setProperty(TwitterSource.CONSUMER_KEY, tool.getRequired("consumer.key"));
    p.setProperty(TwitterSource.CONSUMER_SECRET, tool.getRequired("consumer.secret"));
    p.setProperty(TwitterSource.TOKEN, tool.getRequired("token"));
    p.setProperty(TwitterSource.TOKEN_SECRET, tool.getRequired("token.secret"));

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime);

    DataStream<String> streamSource = env.addSource(new TwitterSource(p));

    // Every 10 minutes, most trending of last 1 hour of tweets.
    SlidingEventTimeWindows windowSpec = SlidingEventTimeWindows.of(Time.hours(1), Time.minutes(10));

    streamSource.flatMap(hashTagExtraction())
            .keyBy(0)
            .sum(1)
            .windowAll(windowSpec)
            .maxBy(1)
            .writeAsText("file:///Users/abij/projects/tryouts/flink-streaming-xke/hot-hashtags.log", OVERWRITE);

    env.execute("Twitter Stream");
}
 
开发者ID:godatadriven,项目名称:flink-streaming-xke,代码行数:27,代码来源:TwitterStreamProcessing.java

示例2: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
	see.setParallelism(1);

	Properties kinesisConsumerConfig = new Properties();
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_REGION, pt.getRequired("region"));
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_ACCESS_KEY_ID, pt.getRequired("accesskey"));
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_SECRET_ACCESS_KEY, pt.getRequired("secretkey"));

	DataStream<String> kinesis = see.addSource(new FlinkKinesisConsumer<>(
		"flink-test",
		new SimpleStringSchema(),
		kinesisConsumerConfig));

	kinesis.print();

	see.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:21,代码来源:ConsumeFromKinesis.java

示例3: parseParams

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public boolean parseParams(String[] args) throws Exception {
	boolean wasHelpPrinted = false;
	ParameterTool parameter = ParameterTool.fromArgs(args);

	if(parameter.has("help")){
		printHelpMessage();
		wasHelpPrinted = true;
	}
	else {
		try {
			dataFilePath = parameter.getRequired("input");
		}
		catch(Exception e) {
			printHelpMessage();
			throw e;
		}

	}

	return wasHelpPrinted;
}
 
开发者ID:dineshtrivedi,项目名称:flink-java-project,代码行数:22,代码来源:TaxiRideCleansingParameterParser.java

示例4: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        // parse parameters
        ParameterTool params = ParameterTool.fromArgs(args);
        // path to ratings.csv file
        String ratingsCsvPath = params.getRequired("input");

        final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

        DataSource<String> file = env.readTextFile(ratingsCsvPath);
        file.flatMap(new ExtractRating())
            .groupBy(0)
            // .reduceGroup(new SumRatingCount())
            .sum(1)
            .print();
    }
 
开发者ID:mushketyk,项目名称:flink-examples,代码行数:17,代码来源:RatingsDistribution.java

示例5: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String args[]) throws Exception{

        StreamExecutionEnvironment env =
                StreamExecutionEnvironment.getExecutionEnvironment();

        ParameterTool parameterTool = ParameterTool.fromArgs(args);

        DataStream<String> messageStream = env
                .addSource(new FlinkKafkaConsumer082 <>(
                parameterTool.getRequired("topic"),
                new SimpleStringSchema(),
                parameterTool.getProperties()));

        // print() will write the contents of the stream to the TaskManager's standard out stream
        // the rebelance call is causing a repartitioning of the data so that all machines
        // see the messages (for example in cases when "num kafka partitions" < "num flink operators"
        messageStream.rebalance().map(new MapFunction<String, String>() {
            private static final long serialVersionUID = -6867736771747690202L;

            @Override
            public String map(String value) throws Exception {
                return "Kafka and Flink says: " + value;
            }
        }).print();

        env.execute();


    }
 
开发者ID:jdc91,项目名称:StreamProcessingInfrastructure,代码行数:30,代码来源:FlinkPoc.java

示例6: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    ParameterTool parameters = ParameterTool.fromArgs(args);

    StreamExecutionEnvironment environment = new ParametersExecutionEnvironmentFactory(parameters)
            .createExecutionEnvironment();

    DataStream<String> stream = environment
            .addSource(new FlinkKafkaConsumerFactory(parameters).createConsumer())
            .name("Consume messages from Kafka.")
            .rebalance();

    DataStream<Tuple2<String, String>> events = stream
            .flatMap(new EventJsonFlapMap())
            .name("Run Eventor, serialize event and produce tuple.");

    ElasticsearchSink sink = new ElasticsearchEventSinkFactory(parameters).create();

    events.addSink(sink)
            .name("Push event to Elasticsearch.");

    environment.execute("Ingest events from Kafka and index in Elasticsearch.");
}
 
开发者ID:webaio,项目名称:processor,代码行数:23,代码来源:IngestEventsElasticsearchJob.java

示例7: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	ParameterTool params = ParameterTool.fromArgs(args);

	// define the dataflow
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(2);
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(10, 1000));
	env.readFileStream("input/", 60000, FileMonitoringFunction.WatchType.ONLY_NEW_FILES)
		.addSink(new DiscardingSink<String>());

	// generate a job graph
	final JobGraph jobGraph = env.getStreamGraph().getJobGraph();
	File jobGraphFile = new File(params.get("output", "job.graph"));
	try (FileOutputStream output = new FileOutputStream(jobGraphFile);
		ObjectOutputStream obOutput = new ObjectOutputStream(output)){
		obOutput.writeObject(jobGraph);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:19,代码来源:StreamingNoop.java

示例8: testControllerUri

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
@Test
public void testControllerUri() throws IOException {
    FlinkPravegaParams helper = new FlinkPravegaParams(ParameterTool.fromArgs(
        new String[] { "--controller", "tcp://controller.pravega.l4lb.thisdcos.directory:9091"}
    ));
    assertEquals(URI.create("tcp://controller.pravega.l4lb.thisdcos.directory:9091"), helper.getControllerUri());
}
 
开发者ID:pravega,项目名称:flink-connectors,代码行数:8,代码来源:FlinkPravegaParamsTest.java

示例9: testStreamParam

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
@Test
public void testStreamParam() {
    String input = "testScope/exampleStream";
    FlinkPravegaParams helper = new FlinkPravegaParams(ParameterTool.fromArgs(
        new String[] { "--input", input}
    ));
    assertEquals(input, helper.getStreamFromParam("input", "default/value").toString());

    helper = new FlinkPravegaParams(ParameterTool.fromMap(new HashMap<>()));
    assertEquals("default/value", helper.getStreamFromParam("input", "default/value").toString());
}
 
开发者ID:pravega,项目名称:flink-connectors,代码行数:12,代码来源:FlinkPravegaParamsTest.java

示例10: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        final ParameterTool params = ParameterTool.fromArgs(args);
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.getConfig().setGlobalJobParameters(params);
        env.setParallelism(3);

        DataStream<String> simpleStringStream = env.addSource(new EventsGenerator());

        Properties configProps = new Properties();
        configProps.put(ConfigConstants.LOG_ENDPOINT, sEndpoint);
        configProps.put(ConfigConstants.LOG_ACCESSSKEYID, sAccessKeyId);
        configProps.put(ConfigConstants.LOG_ACCESSKEY, sAccessKey);
        configProps.put(ConfigConstants.LOG_PROJECT, sProject);
        configProps.put(ConfigConstants.LOG_LOGSTORE, sLogstore);

        FlinkLogProducer<String> logProducer = new FlinkLogProducer<String>(new SimpleLogSerializer(), configProps);
        logProducer.setCustomPartitioner(new LogPartitioner<String>() {
            @Override
            public String getHashKey(String element) {
                try {
                    MessageDigest md = MessageDigest.getInstance("MD5");
                    md.update(element.getBytes());
                    String hash = new BigInteger(1, md.digest()).toString(16);
                    while(hash.length() < 32) hash = "0" + hash;
                    return hash;
                } catch (NoSuchAlgorithmException e) {
                }
                return  "0000000000000000000000000000000000000000000000000000000000000000";
            }
        });
        simpleStringStream.addSink(logProducer);

        env.execute("flink log producer");
    }
 
开发者ID:aliyun,项目名称:aliyun-log-flink-connector,代码行数:36,代码来源:ProducerSample.java

示例11: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        final ParameterTool params = ParameterTool.fromArgs(args);
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.getConfig().setGlobalJobParameters(params);
        env.setParallelism(2);
        env.enableCheckpointing(5000);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        env.setStateBackend(new FsStateBackend("file:///Users/zhouzhou/Binary/flink-1.3.2/testcheckpoints/"));
        RawLogGroupListDeserializer deserializer = new RawLogGroupListDeserializer();
        Properties configProps = new Properties();
        configProps.put(ConfigConstants.LOG_ENDPOINT, sEndpoint);
        configProps.put(ConfigConstants.LOG_ACCESSSKEYID, sAccessKeyId);
        configProps.put(ConfigConstants.LOG_ACCESSKEY, sAccessKey);
        configProps.put(ConfigConstants.LOG_PROJECT, sProject);
        configProps.put(ConfigConstants.LOG_LOGSTORE, sLogstore);
        configProps.put(ConfigConstants.LOG_MAX_NUMBER_PER_FETCH, "10");
        configProps.put(ConfigConstants.LOG_CONSUMER_BEGIN_POSITION, Consts.LOG_FROM_CHECKPOINT);
        configProps.put(ConfigConstants.LOG_CONSUMERGROUP, "23_ots_sla_etl_product");
        DataStream<RawLogGroupList> logTestStream = env.addSource(
                new FlinkLogConsumer<RawLogGroupList>(deserializer, configProps)
        );

        logTestStream.writeAsText("/Users/zhouzhou/Binary/flink-1.3.2/data/newb.txt." + System.nanoTime());
        env.execute("flink log connector");
    }
 
开发者ID:aliyun,项目名称:aliyun-log-flink-connector,代码行数:29,代码来源:ConsumerSample.java

示例12: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    // Read parameters from command line
    final ParameterTool params = ParameterTool.fromArgs(args);

    if(params.getNumberOfParameters() < 4) {
        System.out.println("\nUsage: FlinkReadKafka --read-topic <topic> --write-topic <topic> --bootstrap.servers <kafka brokers> --group.id <groupid>");
        return;
    }


    // setup streaming environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));
    env.enableCheckpointing(300000); // 300 seconds
    env.getConfig().setGlobalJobParameters(params);

    DataStream<String> messageStream = env
            .addSource(new FlinkKafkaConsumer010<>(
                    params.getRequired("read-topic"),
                    new SimpleStringSchema(),
                    params.getProperties())).name("Read from Kafka");

    // setup table environment
    StreamTableEnvironment sTableEnv = TableEnvironment.getTableEnvironment(env);


    // Write JSON payload back to Kafka topic
    messageStream.addSink(new FlinkKafkaProducer010<>(
                params.getRequired("write-topic"),
                new SimpleStringSchema(),
                params.getProperties())).name("Write To Kafka");

    env.execute("FlinkReadWriteKafka");
}
 
开发者ID:kgorman,项目名称:TrafficAnalyzer,代码行数:35,代码来源:FlinkReadWriteKafka.java

示例13: parseConfigurations

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
private void parseConfigurations(String[] args) {

		LOG.info("ApplicationMain Main.. Arguments: {}", Arrays.asList(args));

		ParameterTool parameterTool = ParameterTool.fromArgs(args);
		LOG.info("Parameter Tool: {}", parameterTool.toMap());

		if(parameterTool.getNumberOfParameters() != 2) {
			printUsage();
			System.exit(1);
		}

		String configDirPath = parameterTool.getRequired("configDir");
		try {
			byte[] configurationData = Files.readAllBytes(Paths.get(configDirPath + File.separator + configFile));
			String jsonData = new String(configurationData);
			LOG.info("App Configurations raw data: {}", jsonData);
			Gson gson = new Gson();
			appConfiguration = gson.fromJson(jsonData, AppConfiguration.class);
		} catch (IOException e) {
			LOG.error("Could not read {}",configFile, e);
			System.exit(1);
		}

		runMode = parameterTool.getInt("mode");

		pravega = new FlinkPravegaParams(ParameterTool.fromArgs(args));
	}
 
开发者ID:pravega,项目名称:pravega-samples,代码行数:29,代码来源:PipelineRunner.java

示例14: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    ParameterTool params = ParameterTool.fromArgs(args);
    FlinkPravegaParams helper = new FlinkPravegaParams(params);
    StreamId stream = helper.createStreamFromParam("input", "examples/turbineHeatTest");

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

    // 1. read and decode the sensor events from a Pravega stream
    long startTime = params.getLong("start", 0L);
    FlinkPravegaReader<String> reader = helper.newReader(stream, startTime, String.class);
    DataStream<SensorEvent> events = env.addSource(reader, "input").map(new SensorMapper()).name("events");

    // 2. extract timestamp information to support 'event-time' processing
    SingleOutputStreamOperator<SensorEvent> timestamped = events.assignTimestampsAndWatermarks(
            new BoundedOutOfOrdernessTimestampExtractor<SensorEvent>(Time.seconds(10)) {
        @Override
        public long extractTimestamp(SensorEvent element) {
            return element.getTimestamp();
        }
    });
    timestamped.print();

    // 3. summarize the temperature data for each sensor
    SingleOutputStreamOperator<SensorAggregate> summaries = timestamped
            .keyBy("sensorId")
            .window(TumblingEventTimeWindows.of(Time.days(1), Time.hours(8)))
            .fold(null, new SensorAggregator()).name("summaries");

    // 4. save to HDFS and print to stdout.  Refer to the TaskManager's 'Stdout' view in the Flink UI.
    summaries.print().name("stdout");
    if (params.has("output")) {
        summaries.writeAsCsv(params.getRequired("output"), FileSystem.WriteMode.OVERWRITE);
    }

    env.execute("TurbineHeatProcessor_" + stream);
}
 
开发者ID:pravega,项目名称:pravega-samples,代码行数:38,代码来源:TurbineHeatProcessor.java

示例15: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(final String[] args) throws Exception {

		final ParameterTool params = ParameterTool.fromArgs(args);

		final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

		// make parameters available in the web interface
		env.getConfig().setGlobalJobParameters(params);

		// get the data set
		final DataSet<StringTriple> file = getDataSet(env, params);

		// filter lines with empty fields
		final DataSet<StringTriple> filteredLines = file.filter(new EmptyFieldFilter());

		// Here, we could do further processing with the filtered lines...
		JobExecutionResult result;
		// output the filtered lines
		if (params.has("output")) {
			filteredLines.writeAsCsv(params.get("output"));
			// execute program
			result = env.execute("Accumulator example");
		} else {
			System.out.println("Printing result to stdout. Use --output to specify output path.");
			filteredLines.print();
			result = env.getLastJobExecutionResult();
		}

		// get the accumulator result via its registration key
		final List<Integer> emptyFields = result.getAccumulatorResult(EMPTY_FIELD_ACCUMULATOR);
		System.out.format("Number of detected empty fields per column: %s\n", emptyFields);
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:33,代码来源:EmptyFieldsCountAccumulator.java


注:本文中的org.apache.flink.api.java.utils.ParameterTool.fromArgs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。