当前位置: 首页>>代码示例>>Java>>正文


Java ParameterTool.has方法代码示例

本文整理汇总了Java中org.apache.flink.api.java.utils.ParameterTool.has方法的典型用法代码示例。如果您正苦于以下问题:Java ParameterTool.has方法的具体用法?Java ParameterTool.has怎么用?Java ParameterTool.has使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.api.java.utils.ParameterTool的用法示例。


在下文中一共展示了ParameterTool.has方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: parseParams

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public boolean parseParams(String[] args) throws Exception {
	boolean wasHelpPrinted = false;
	ParameterTool parameter = ParameterTool.fromArgs(args);

	if(parameter.has("help")){
		printHelpMessage();
		wasHelpPrinted = true;
	}
	else {
		try {
			dataFilePath = parameter.getRequired("input");
		}
		catch(Exception e) {
			printHelpMessage();
			throw e;
		}

	}

	return wasHelpPrinted;
}
 
开发者ID:dineshtrivedi,项目名称:flink-java-project,代码行数:22,代码来源:TaxiRideCleansingParameterParser.java

示例2: configure

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
@Override
public void configure(ParameterTool parameterTool) {
	if (hasDefaultValue && !parameterTool.has(name)) {
		// skip checks for min and max when using default value
		value = defaultValue;
	} else {
		value = parameterTool.getLong(name);

		if (hasMinimumValue) {
			Util.checkParameter(value >= minimumValue,
				name + " must be greater than or equal to " + minimumValue);
		}

		if (hasMaximumValue) {
			Util.checkParameter(value <= maximumValue,
				name + " must be less than or equal to " + maximumValue);
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:20,代码来源:LongParameter.java

示例3: configure

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
@Override
public void configure(ParameterTool parameterTool) {
	if (!parameterTool.has("iterations") && !parameterTool.has("convergence_threshold")) {
		// no configuration so use default iterations and maximum threshold
		value.iterations = defaultIterations;
		value.convergenceThreshold = Double.MAX_VALUE;
	} else {
		// use configured values and maximum default for unset values
		value.iterations = parameterTool.getInt("iterations", Integer.MAX_VALUE);
		Util.checkParameter(value.iterations > 0,
			"iterations must be greater than zero");

		value.convergenceThreshold = parameterTool.getDouble("convergence_threshold", Double.MAX_VALUE);
		Util.checkParameter(value.convergenceThreshold > 0,
			"convergence threshold must be greater than zero");
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:18,代码来源:IterationConvergence.java

示例4: configure

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
@Override
public void configure(ParameterTool parameterTool) {
	String ordering = parameterTool.get("simplify");

	if (ordering == null) {
		value = Ordering.NONE;
	} else {
		switch (ordering.toLowerCase()) {
			case "directed":
				value = Ordering.DIRECTED;
				break;
			case "undirected":
				value = parameterTool.has("clip_and_flip") ? Ordering.UNDIRECTED_CLIP_AND_FLIP : Ordering.UNDIRECTED;
				break;
			default:
				throw new ProgramParametrizationException(
					"Expected 'directed' or 'undirected' ordering but received '" + ordering + "'");
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:21,代码来源:Simplify.java

示例5: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    ParameterTool params = ParameterTool.fromArgs(args);
    FlinkPravegaParams helper = new FlinkPravegaParams(params);
    StreamId stream = helper.createStreamFromParam("input", "examples/turbineHeatTest");

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

    // 1. read and decode the sensor events from a Pravega stream
    long startTime = params.getLong("start", 0L);
    FlinkPravegaReader<String> reader = helper.newReader(stream, startTime, String.class);
    DataStream<SensorEvent> events = env.addSource(reader, "input").map(new SensorMapper()).name("events");

    // 2. extract timestamp information to support 'event-time' processing
    SingleOutputStreamOperator<SensorEvent> timestamped = events.assignTimestampsAndWatermarks(
            new BoundedOutOfOrdernessTimestampExtractor<SensorEvent>(Time.seconds(10)) {
        @Override
        public long extractTimestamp(SensorEvent element) {
            return element.getTimestamp();
        }
    });
    timestamped.print();

    // 3. summarize the temperature data for each sensor
    SingleOutputStreamOperator<SensorAggregate> summaries = timestamped
            .keyBy("sensorId")
            .window(TumblingEventTimeWindows.of(Time.days(1), Time.hours(8)))
            .fold(null, new SensorAggregator()).name("summaries");

    // 4. save to HDFS and print to stdout.  Refer to the TaskManager's 'Stdout' view in the Flink UI.
    summaries.print().name("stdout");
    if (params.has("output")) {
        summaries.writeAsCsv(params.getRequired("output"), FileSystem.WriteMode.OVERWRITE);
    }

    env.execute("TurbineHeatProcessor_" + stream);
}
 
开发者ID:pravega,项目名称:pravega-samples,代码行数:38,代码来源:TurbineHeatProcessor.java

示例6: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		// Checking input parameters
		final ParameterTool params = ParameterTool.fromArgs(args);

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// make parameters available in the web interface
		env.getConfig().setGlobalJobParameters(params);

		// get input data
		DataStream<String> text;
		if (params.has("input")) {
			// read the text file from given input path
			text = env.readTextFile(params.get("input"));
		} else {
			System.out.println("Executing WordCount example with default input data set.");
			System.out.println("Use --input to specify file input.");
			// get default test text data
			text = env.fromElements(WordCountData.WORDS);
		}

		DataStream<Tuple2<String, Integer>> counts =
		// split up the lines in pairs (2-tuples) containing: (word,1)
		text.flatMap(new Tokenizer())
		// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (params.has("output")) {
			counts.writeAsText(params.get("output"));
		} else {
			System.out.println("Printing result to stdout. Use --output to specify output path.");
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount");
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:41,代码来源:WordCount.java

示例7: getLinksDataSet

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
private static DataSet<Tuple2<Long, Long>> getLinksDataSet(ExecutionEnvironment env, ParameterTool params) {
	if (params.has("links")) {
		return env.readCsvFile(params.get("links"))
			.fieldDelimiter(" ")
			.lineDelimiter("\n")
			.types(Long.class, Long.class);
	} else {
		System.out.println("Executing PageRank example with default links data set.");
		System.out.println("Use --links to specify file input.");
		return PageRankData.getDefaultEdgeDataSet(env);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:13,代码来源:PageRank.java

示例8: getDocumentsDataSet

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
private static DataSet<Tuple2<String, String>> getDocumentsDataSet(ExecutionEnvironment env, ParameterTool params) {
	// Create DataSet for documents relation (URL, Doc-Text)
	if (params.has("documents")) {
		return env.readCsvFile(params.get("documents"))
					.fieldDelimiter("|")
					.types(String.class, String.class);
	} else {
		System.out.println("Executing WebLogAnalysis example with default documents data set.");
		System.out.println("Use --documents to specify file input.");
		return WebLogData.getDocumentDataSet(env);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:13,代码来源:WebLogAnalysis.java

示例9: getRanksDataSet

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
private static DataSet<Tuple3<Integer, String, Integer>> getRanksDataSet(ExecutionEnvironment env, ParameterTool params) {
	// Create DataSet for ranks relation (Rank, URL, Avg-Visit-Duration)
	if (params.has("ranks")) {
		return env.readCsvFile(params.get("ranks"))
					.fieldDelimiter("|")
					.types(Integer.class, String.class, Integer.class);
	} else {
		System.out.println("Executing WebLogAnalysis example with default ranks data set.");
		System.out.println("Use --ranks to specify file input.");
		return WebLogData.getRankDataSet(env);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:13,代码来源:WebLogAnalysis.java

示例10: getVisitsDataSet

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
private static DataSet<Tuple2<String, String>> getVisitsDataSet(ExecutionEnvironment env, ParameterTool params) {
	// Create DataSet for visits relation (URL, Date)
	if (params.has("visits")) {
		return env.readCsvFile(params.get("visits"))
					.fieldDelimiter("|")
					.includeFields("011000000")
					.types(String.class, String.class);
	} else {
		System.out.println("Executing WebLogAnalysis example with default visits data set.");
		System.out.println("Use --visits to specify file input.");
		return WebLogData.getVisitDataSet(env);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:14,代码来源:WebLogAnalysis.java

示例11: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		// Checking input parameters
		final ParameterTool params = ParameterTool.fromArgs(args);

		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

		DataStream<Integer> trainingData = env.addSource(new FiniteTrainingDataSource());
		DataStream<Integer> newData = env.addSource(new FiniteNewDataSource());

		// build new model on every second of new data
		DataStream<Double[]> model = trainingData
				.assignTimestampsAndWatermarks(new LinearTimestamp())
				.timeWindowAll(Time.of(5000, TimeUnit.MILLISECONDS))
				.apply(new PartialModelBuilder());

		// use partial model for newData
		DataStream<Integer> prediction = newData.connect(model).map(new Predictor());

		// emit result
		if (params.has("output")) {
			prediction.writeAsText(params.get("output"));
		} else {
			System.out.println("Printing result to stdout. Use --output to specify output path.");
			prediction.print();
		}

		// execute program
		env.execute("Streaming Incremental Learning");
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:32,代码来源:IncrementalLearningSkeleton.java

示例12: getDataSet

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private static DataSet<StringTriple> getDataSet(ExecutionEnvironment env, ParameterTool params) {
	if (params.has("input")) {
		return env.readCsvFile(params.get("input"))
			.fieldDelimiter(";")
			.pojoType(StringTriple.class);
	} else {
		System.out.println("Executing EmptyFieldsCountAccumulator example with default input data set.");
		System.out.println("Use --input to specify file input.");
		return env.fromCollection(getExampleInputTuples());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:13,代码来源:EmptyFieldsCountAccumulator.java

示例13: getCentroidDataSet

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
private static DataSet<Centroid> getCentroidDataSet(ParameterTool params, ExecutionEnvironment env) {
	DataSet<Centroid> centroids;
	if (params.has("centroids")) {
		centroids = env.readCsvFile(params.get("centroids"))
			.fieldDelimiter(" ")
			.pojoType(Centroid.class, "id", "x", "y");
	} else {
		System.out.println("Executing K-Means example with default centroid data set.");
		System.out.println("Use --centroids to specify file input.");
		centroids = KMeansData.getDefaultCentroidDataSet(env);
	}
	return centroids;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:14,代码来源:KMeans.java

示例14: main

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		final ParameterTool params = ParameterTool.fromArgs(args);

		// set up the execution environment
		final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

		// make parameters available in the web interface
		env.getConfig().setGlobalJobParameters(params);

		// get input data
		DataSet<String> text;
		if (params.has("input")) {
			// read the text file from given input path
			text = env.readTextFile(params.get("input"));
		} else {
			// get default test text data
			System.out.println("Executing WordCount example with default input data set.");
			System.out.println("Use --input to specify file input.");
			text = WordCountData.getDefaultTextLineDataSet(env);
		}

		DataSet<Tuple2<String, Integer>> counts =
				// split up the lines in pairs (2-tuples) containing: (word,1)
				text.flatMap(new Tokenizer())
				// group by the tuple field "0" and sum up tuple field "1"
				.groupBy(0)
				.sum(1);

		// emit result
		if (params.has("output")) {
			counts.writeAsCsv(params.get("output"), "\n", " ");
			// execute program
			env.execute("WordCount Example");
		} else {
			System.out.println("Printing result to stdout. Use --output to specify output path.");
			counts.print();
		}

	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:41,代码来源:WordCount.java

示例15: getVertexDataSet

import org.apache.flink.api.java.utils.ParameterTool; //导入方法依赖的package包/类
private static DataSet<Long> getVertexDataSet(ExecutionEnvironment env, ParameterTool params) {
	if (params.has("vertices")) {
		return env.readCsvFile(params.get("vertices")).types(Long.class).map(
			new MapFunction<Tuple1<Long>, Long>() {
				public Long map(Tuple1<Long> value) {
					return value.f0;
				}
			});
	} else {
		System.out.println("Executing Connected Components example with default vertices data set.");
		System.out.println("Use --vertices to specify file input.");
		return ConnectedComponentsData.getDefaultVertexDataSet(env);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:15,代码来源:ConnectedComponents.java


注:本文中的org.apache.flink.api.java.utils.ParameterTool.has方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。