当前位置: 首页>>代码示例>>Java>>正文


Java ParameterTool.getRequired方法代码示例

本文整理汇总了Java中org.apache.flink.api.java.utils.ParameterTool.getRequired方法的典型用法代码示例。如果您正苦于以下问题:Java ParameterTool.getRequired方法的具体用法?Java ParameterTool.getRequired怎么用?Java ParameterTool.getRequired使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.api.java.utils.ParameterTool的用法示例。


在下文中一共展示了ParameterTool.getRequired方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        // parse parameters
        ParameterTool params = ParameterTool.fromArgs(args);
        // path to ratings.csv file
        String ratingsCsvPath = params.getRequired("input");

        final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

        DataSource<String> file = env.readTextFile(ratingsCsvPath);
        file.flatMap(new ExtractRating())
            .groupBy(0)
            // .reduceGroup(new SumRatingCount())
            .sum(1)
            .print();
    }
 
开发者ID:mushketyk,项目名称:flink-examples,代码行数:17,代码来源:RatingsDistribution.java

示例2: parseParams

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public boolean parseParams(String[] args) throws Exception {
	boolean wasHelpPrinted = false;
	ParameterTool parameter = ParameterTool.fromArgs(args);

	if(parameter.has("help")){
		printHelpMessage();
		wasHelpPrinted = true;
	}
	else {
		try {
			dataFilePath = parameter.getRequired("input");
		}
		catch(Exception e) {
			printHelpMessage();
			throw e;
		}

	}

	return wasHelpPrinted;
}
 
开发者ID:dineshtrivedi,项目名称:flink-java-project,代码行数:22,代码来源:TaxiRideCleansingParameterParser.java

示例3: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    ParameterTool tool = ParameterTool.fromArgs(args);

    String topic = tool.getRequired("kafka.topic");

    Properties kafkaConsumerProps = new Properties();
    kafkaConsumerProps.setProperty("bootstrap.servers", tool.getRequired("kafkabroker"));
    kafkaConsumerProps.setProperty("group.id", tool.getRequired("kafka.groupId"));
    kafkaConsumerProps.setProperty("zookeeper.connect", tool.get("zookeeper.host", "localhost:2181"));
    kafkaConsumerProps.setProperty("auto.offset.reset", tool.getBoolean("from-beginning", false) ? "smallest" : "largest");

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    DataStream<String> textStream = env
            .addSource(new FlinkKafkaConsumer08<>(topic, new SimpleStringSchema(), kafkaConsumerProps));

    textStream.flatMap(new LineSplitter())
        .keyBy(0)
        .sum(1)
        .print();

    env.execute("WordCount from Kafka Example");
}
 
开发者ID:godatadriven,项目名称:flink-streaming-xke,代码行数:24,代码来源:KafkaStreamingWordCount.java

示例4: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);
	String configDir = pt.getRequired("configDir");

	LOG.info("Loading configuration from {}", configDir);
	final Configuration flinkConfig = GlobalConfiguration.loadConfiguration(configDir);

	// run the history server
	SecurityUtils.install(new SecurityConfiguration(flinkConfig));

	try {
		SecurityUtils.getInstalledContext().runSecured(new Callable<Integer>() {
			@Override
			public Integer call() throws Exception {
				HistoryServer hs = new HistoryServer(flinkConfig);
				hs.run();
				return 0;
			}
		});
		System.exit(0);
	} catch (UndeclaredThrowableException ute) {
		Throwable cause = ute.getUndeclaredThrowable();
		LOG.error("Failed to run HistoryServer.", cause);
		cause.printStackTrace();
		System.exit(1);
	} catch (Exception e) {
		LOG.error("Failed to run HistoryServer.", e);
		e.printStackTrace();
		System.exit(1);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:32,代码来源:HistoryServer.java

示例5: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) {
	try {
		// startup checks and logging
		EnvironmentInformation.logEnvironmentInfo(LOG, "ZooKeeper Quorum Peer", args);
		
		final ParameterTool params = ParameterTool.fromArgs(args);
		final String zkConfigFile = params.getRequired("zkConfigFile");
		final int peerId = params.getInt("peerId");

		// Run quorum peer
		runFlinkZkQuorumPeer(zkConfigFile, peerId);
	}
	catch (Throwable t) {
		LOG.error("Error running ZooKeeper quorum peer: " + t.getMessage(), t);
		System.exit(-1);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:18,代码来源:FlinkZooKeeperQuorumPeer.java

示例6: parseConfigurations

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
private void parseConfigurations(String[] args) {

		LOG.info("ApplicationMain Main.. Arguments: {}", Arrays.asList(args));

		ParameterTool parameterTool = ParameterTool.fromArgs(args);
		LOG.info("Parameter Tool: {}", parameterTool.toMap());

		if(parameterTool.getNumberOfParameters() != 2) {
			printUsage();
			System.exit(1);
		}

		String configDirPath = parameterTool.getRequired("configDir");
		try {
			byte[] configurationData = Files.readAllBytes(Paths.get(configDirPath + File.separator + configFile));
			String jsonData = new String(configurationData);
			LOG.info("App Configurations raw data: {}", jsonData);
			Gson gson = new Gson();
			appConfiguration = gson.fromJson(jsonData, AppConfiguration.class);
		} catch (IOException e) {
			LOG.error("Could not read {}",configFile, e);
			System.exit(1);
		}

		runMode = parameterTool.getInt("mode");

		pravega = new FlinkPravegaParams(ParameterTool.fromArgs(args));
	}
 
开发者ID:pravega,项目名称:pravega-samples,代码行数:29,代码来源:PipelineRunner.java

示例7: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

    // Parse command line parameters
    ParameterTool parameterTool = ParameterTool.fromArgs(args);
    String host = parameterTool.getRequired("host");
    int port = Integer.valueOf(parameterTool.getRequired("port"));

    // Setup the execution environment
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.enableCheckpointing(1000);
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
    env.setParallelism(1);
    
    // Stream of updates to subscriptions, partitioned by tweetId, read from socket
    DataStream<TweetSubscription> filterUpdateStream = env.socketTextStream(host, port)
      .map(stringToTweetSubscription())
      .keyBy(TweetSubscription.getKeySelector());

    // TweetImpression stream, partitioned by tweetId
    DataStream<TweetImpression> tweetStream = env.addSource(new TweetSourceFunction(false), "TweetImpression Source")
      .keyBy(TweetImpression.getKeySelector());

    // Run the tweet impressions past the filters and emit those that customers have requested
    DataStream<CustomerImpression> filteredStream = tweetStream
      .connect(filterUpdateStream)
      .flatMap(new TweetSubscriptionFilterFunction());

    // Create a seperate sink for each customer
    DataStreamSink<CustomerImpression>[] customerSinks = setupCustomerSinks(filteredStream);

    // Run it
    env.execute();
  }
 
开发者ID:jgrier,项目名称:FilteringExample,代码行数:34,代码来源:TweetImpressionFilteringJob.java

示例8: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // get CLI parameters
        ParameterTool parameters = ParameterTool.fromArgs(args);
        String topic = parameters.getRequired("topic");
        String groupId = parameters.get("group-id", "flink-kafka-consumer");
        String propertiesFile = parameters.getRequired("env");
        ParameterTool envProperties = ParameterTool.fromPropertiesFile(propertiesFile);
        String schemaRegistryUrl = envProperties.getRequired("registry_url");
        String bootstrapServers = envProperties.getRequired("brokers");
        String zookeeperConnect = envProperties.getRequired("zookeeper");

        // setup Kafka sink
        ConfluentAvroDeserializationSchema deserSchema = new ConfluentAvroDeserializationSchema(schemaRegistryUrl);
        Properties kafkaProps = new Properties();
        kafkaProps.setProperty("bootstrap.servers", bootstrapServers);
        kafkaProps.setProperty("zookeeper.connect", zookeeperConnect);
        kafkaProps.setProperty("group.id", groupId);
        FlinkKafkaConsumer08<String> flinkKafkaConsumer = new FlinkKafkaConsumer08<String>(topic, deserSchema, kafkaProps);

        DataStream<String> kafkaStream = env.addSource(flinkKafkaConsumer);

        DataStream<Integer> counts = kafkaStream
                .map(new MapFunction<String, Integer>() {
                    public Integer map(String s) throws Exception {
                        return 1;
                    }
                })
                .timeWindowAll(Time.seconds(3))
                .sum(0);

        counts.print();

        env.execute("Flink Kafka Java Example");
    }
 
开发者ID:seanpquig,项目名称:flink-streaming-confluent,代码行数:38,代码来源:FlinkKafkaExample.java

示例9: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		// read parameters
		ParameterTool params = ParameterTool.fromArgs(args);
		String nycTaxiRidesPath = params.getRequired("nycTaxiRidesPath");

		// set up streaming execution environment
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// ===============================================================================
		//   1. remember to set this job to use "Event Time"
		// ===============================================================================

		// start the data generator
		DataStream<TaxiRide> rides = env.addSource(
				new TaxiRideSource(nycTaxiRidesPath, MAX_EVENT_DELAY, SERVING_SPEED_FACTOR));

		// ===============================================================================
		//   2. again, filter ride events to contain only valid geo coordinates
		//   3. map each ride event to a tuple 2 pair: (grid cell id, the event)
		//   4. partition the stream by the grid cell id
		//   5. aggregate the number of ride events (start and end) in each grid cell
		//      over a sliding window (span 15 minutes, slide 1 minute), and output:
		//      (cellId, time, eventCount)
		//   6. filter out window outputs if the number of ride events is
		//      lower than POPULAR_THRESHOLD.
		//   7. map the grid cell back to geo coordinates, and print as format:
		//      (lon, lat, time, eventCount)
		// ===============================================================================

		// execute the transformation pipeline
		env.execute("Popular Places");
	}
 
开发者ID:flink-taiwan,项目名称:jcconf2016-workshop,代码行数:34,代码来源:TaxiRidePopularPlaces.java

示例10: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		ParameterTool params = ParameterTool.fromArgs(args);
		final String nycTaxiRidesPath = params.getRequired("nycTaxiRidesPath");

		final int servingSpeedFactor = 600; // events of 10 minutes are served in 1 second

		// set up streaming execution environment
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		// operate in Event-time
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

		// ===============================================================================
		//   1. we want to persist our incrementally trained model, even on failure,
		//      so you must enable checkpointing! (set to a 5 second interval)
		// ===============================================================================

		// start the data generator
		DataStream<TaxiRide> rides = env.addSource(
				new CheckpointedTaxiRideSource(nycTaxiRidesPath, servingSpeedFactor));

		DataStream<Tuple2<Long, Integer>> predictions = rides
				// filter out rides that do not start or stop in NYC
				.filter(new NYCFilter())
				// map taxi ride events to the grid cell of the destination
				.map(new DestinationGridCellMatcher())
				// organize stream by destination
				.keyBy(0)
				// ===============================================================================
				//   2. the PredictionModel flatMap function is not implemented yet;
				//      finish its implementation down below!
				// ===============================================================================
				.flatMap(new PredictionModel());

		// print the predictions
		predictions.print();

		// run the prediction pipeline
		env.execute("Taxi Ride Prediction");
	}
 
开发者ID:flink-taiwan,项目名称:jcconf2016-workshop,代码行数:41,代码来源:TaxiRideTravelTimePrediction.java

示例11: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		ParameterTool params = ParameterTool.fromArgs(args);
		final String nycTaxiRidesPath = params.getRequired("nycTaxiRidesPath");

		final int maxEventDelay = 60;       // events are out of order by max 60 seconds
		final int servingSpeedFactor = 600; // events of 10 minutes are served in 1 second

		// set up streaming execution environment
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// start the data generator
		DataStream<TaxiRide> rides = env.addSource(
				new TaxiRideSource(nycTaxiRidesPath, maxEventDelay, servingSpeedFactor));

		// ===============================================================================
		//   1. clean up `rides`, so that the output stream only contains events
		//      with valid geo coordinates within NYC.
		//   2. print out the result stream to console
		// ===============================================================================
		DataStream<TaxiRide> filteredRides = rides
				// filter out rides that do not start or stop in NYC
				.filter(new NYCFilter());

		// print the filtered stream
		filteredRides.print();

		// run the cleansing pipeline
		env.execute("Taxi Ride Cleansing");
	}
 
开发者ID:flink-taiwan,项目名称:jcconf2016-workshop,代码行数:31,代码来源:TaxiRideCleansingAnswer.java

示例12: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		// read parameters
		ParameterTool params = ParameterTool.fromArgs(args);
		String nycTaxiRidesPath = params.getRequired("nycTaxiRidesPath");

		final int popThreshold = 20;        // threshold for popular places
		final int maxEventDelay = 60;       // events are out of order by max 60 seconds
		final int servingSpeedFactor = 600; // events of 10 minutes are served in 1 second

		// set up streaming execution environment
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// ===============================================================================
		//   1. remember to set this job to use "Event Time"
		// ===============================================================================
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

		// start the data generator
		DataStream<TaxiRide> rides = env.addSource(
				new TaxiRideSource(nycTaxiRidesPath, maxEventDelay, servingSpeedFactor));

		// ===============================================================================
		//   2. again, filter ride events to contain only valid geo coordinates
		//   3. map each ride event to a tuple 2 pair: (grid cell id, the event)
		//   4. partition the stream by the grid cell id
		//   5. aggregate the number of ride events (start and end) in each grid cell
		//      over a sliding window (span 15 minutes, slide 1 minute), and output:
		//      (cellId, time, eventCount)
		//   6. filter out window outputs if the number of ride events is
		//      lower than POPULAR_THRESHOLD.
		//   7. map the grid cell back to geo coordinates, and print as format:
		//      (lon, lat, time, eventCount)
		// ===============================================================================
		DataStream<Tuple4<Float, Float, Long, Integer>> popularSpots = rides
				// 2. again, filter ride events to contain only valid geo coordinates
				.filter(new NYCFilter())
				// 3. map each ride event to a tuple 2 pair: (grid cell id, the event)
				.map(new GridCellMatcher())
				// 4. partition the stream by the grid cell id
				.keyBy(0)
				// 5. aggregate the number of ride events (start and end) in each grid cell
				//	  over a sliding window (span 15 minutes, slide 1 minute), and output: (cellId, time, eventCount)
				.timeWindow(Time.minutes(15), Time.minutes(5))
				.apply(new RideCounter())
				// 6. filter out window outputs if the number of ride events is
				// 	  lower than POPULAR_THRESHOLD.
				.filter(new FilterFunction<Tuple3<Integer, Long, Integer>>() {
					@Override
					public boolean filter(Tuple3<Integer, Long, Integer> count) throws Exception {
						return count.f2 >= popThreshold;
					}
				})
				// 7. map the grid cell back to geo coordinates, and print as format: (lon, lat, time, eventCount)
				.map(new GridToCoordinates());

		// print result on stdout
		popularSpots.print();

		// execute the transformation pipeline
		env.execute("Popular Places");
	}
 
开发者ID:flink-taiwan,项目名称:jcconf2016-workshop,代码行数:63,代码来源:TaxiRidePopularPlacesAnswer.java

示例13: SummarizationJobParameters

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public SummarizationJobParameters(ParameterTool params) {
    timelyHostname = params.getRequired("timelyHostname");
    timelyTcpPort = params.getInt("timelyTcpPort", 4241);
    timelyHttpsPort = params.getInt("timelyHttpsPort", 4242);
    timelyWssPort = params.getInt("timelyWssPort", 4243);
    doLogin = params.getBoolean("doLogin", false);
    timelyUsername = params.get("timelyUsername", null);
    timelyPassword = params.get("timelyPassword", null);
    keyStoreFile = params.getRequired("keyStoreFile");
    keyStoreType = params.get("keyStoreType", "JKS");
    keyStorePass = params.getRequired("keyStorePass");
    trustStoreFile = params.getRequired("trustStoreFile");
    trustStoreType = params.get("trustStoreType", "JKS");
    trustStorePass = params.getRequired("trustStorePass");
    hostVerificationEnabled = params.getBoolean("hostVerificationEnabled", true);
    bufferSize = params.getInt("bufferSize", 10485760);
    String metricNames = params.getRequired("metrics");
    if (null != metricNames) {
        metrics = metricNames.split(",");
    } else {
        metrics = null;
    }
    startTime = params.getLong("startTime", 0L);
    endTime = params.getLong("endTime", 0L);
    interval = params.getRequired("interval");
    intervalUnits = params.getRequired("intervalUnits");
}
 
开发者ID:NationalSecurityAgency,项目名称:timely,代码行数:28,代码来源:SummarizationJobParameters.java

示例14: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    ParameterTool tool = ParameterTool.fromArgs(args);

    String topic = tool.getRequired("kafka.topic");

    Properties kafkaConsumerProps = new Properties();
    kafkaConsumerProps.setProperty("bootstrap.servers", tool.getRequired("kafkabroker"));
    kafkaConsumerProps.setProperty("group.id", tool.getRequired("kafka.groupId"));
    kafkaConsumerProps.setProperty("zookeeper.connect", tool.get("zookeeper.host", "localhost:2181"));
    kafkaConsumerProps.setProperty("auto.offset.reset", tool.getBoolean("from-beginning", false) ? "smallest" : "largest");

    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime);

    DataStream<String> textStream = env
            .addSource(new FlinkKafkaConsumer08<>(topic, new SimpleStringSchema(), kafkaConsumerProps));

    SlidingEventTimeWindows window = SlidingEventTimeWindows.of(Time.minutes(1), Time.seconds(30));

    textStream.flatMap(new LineSplitter())
        .keyBy(0)
        .sum(1)
        .windowAll(window)
        .maxBy(1)
        .writeAsText("file:///Users/abij/projects/tryouts/flink-streaming/flink-streaming-results.log", OVERWRITE);

    env.execute("SlidingWindow WordCount");
}
 
开发者ID:godatadriven,项目名称:flink-streaming-xke,代码行数:29,代码来源:SlidingWindowingWordCount.java

示例15: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    // Read parameters from command line
    final ParameterTool params = ParameterTool.fromArgs(args);

    if(params.getNumberOfParameters() < 4) {
        System.out.println("\nUsage: FlinkReadKafka " +
                           "--read-topic <topic> " +
                           "--write-topic <topic> " +
                           "--bootstrap.servers <kafka brokers> " +
                           "--group.id <groupid>");
        return;
    }

    // define a schema
    String[] fieldNames = { "flight", "timestamp_verbose", "msg_type", "track",
            "timestamp", "altitude", "counter", "lon",
            "icao", "vr", "lat", "speed" };
    TypeInformation<?>[] dataTypes = { Types.INT, Types.STRING, Types.STRING, Types.STRING,
            Types.SQL_TIMESTAMP, Types.STRING, Types.STRING, Types.STRING,
            Types.STRING, Types.STRING, Types.STRING, Types.STRING };

    TypeInformation<Row> dataRow = Types.ROW_NAMED(fieldNames, dataTypes);

    // setup streaming environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));
    env.enableCheckpointing(300000); // 300 seconds
    env.getConfig().setGlobalJobParameters(params);

    StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);

    KafkaTableSource kafkaTableSource = Kafka010JsonTableSource.builder()
            .forTopic(params.getRequired("read-topic"))
            .withKafkaProperties(params.getProperties())
            .withSchema(TableSchema.fromTypeInfo(dataRow))
            .forJsonSchema(TableSchema.fromTypeInfo(dataRow))
            .build();

    String sql = "SELECT timestamp_verbose, icao, lat, lon, altitude " +
                 "FROM flights " +
                 "WHERE altitude <> '' ";
    tableEnv.registerTableSource("flights", kafkaTableSource);
    Table result = tableEnv.sqlQuery(sql);


    // create a partition for the data going into kafka
    FlinkFixedPartitioner partition =  new FlinkFixedPartitioner();

    // create new tablesink of JSON to kafka
    KafkaJsonTableSink kafkaTableSink = new Kafka09JsonTableSink(
            params.getRequired("write-topic"),
            params.getProperties(),
            partition);

    result.writeToSink(kafkaTableSink);

    env.execute("FlinkReadWriteKafkaJSON");
}
 
开发者ID:kgorman,项目名称:TrafficAnalyzer,代码行数:59,代码来源:FlinkReadWriteKafkaJSON.java


注:本文中的org.apache.flink.api.java.utils.ParameterTool.getRequired方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。