本文整理匯總了Java中org.apache.hadoop.util.ProgramDriver.run方法的典型用法代碼示例。如果您正苦於以下問題:Java ProgramDriver.run方法的具體用法?Java ProgramDriver.run怎麽用?Java ProgramDriver.run使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.util.ProgramDriver
的用法示例。
在下文中一共展示了ProgramDriver.run方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]) {
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("wordcount", WordCount.class,
"A map/reduce program that counts the words in the input files.");
pgd.addClass("xflowstatic", XflowStatic.class,
"A map/reduce program that static xflow from data files.");
exitCode = pgd.run(argv);
} catch (Throwable e) {
e.printStackTrace();
}
System.exit(exitCode);
}
示例2: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String[] args) {
ProgramDriver programDriver = new ProgramDriver();
int exitCode = -1;
try {
programDriver.addClass("wordcount-hbase", WordCountHBase.class,
"A map/reduce program that counts the words in the input files.");
programDriver.addClass("export-table", Export.class,
"A map/reduce program that exports a table to a file.");
//programDriver.addClass("cellcounter", CellCounter.class, "Count them cells!");
programDriver.driver(args);
exitCode = programDriver.run(args);
} catch (Throwable e) {
e.printStackTrace();
}
System.exit(exitCode);
}
示例3: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]){
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("WikiTitleExtract", WikiTitleExtract.class,
"A map/reduce program that extract the contents between <title></title> in xml files");
pgd.addClass("WikiWordCount", WikiWordCount.class,
"A map/reduce program that count the word frequency in the files");
pgd.addClass("BadDocCount", BadDocCount.class,
"A map/reduce program that count the document which do not contains <title></title>");
pgd.addClass("InvertedIndex", InvertedIndex.class,
"A map/reduce program that calculate the inverted index");
pgd.addClass("IDOffsetTitle", IDOffsetTitle.class,
"A map/reduce program that extract the <ID> <Offset> <title> tripe");
pgd.addClass("RedirectCount", RedirectCount.class,
"A map/reduce program that count the number of redirect page");
pgd.addClass("PageWordCount", PageWordCount.class,
"A map/reduce program that count the wordcount of each page");
pgd.addClass("PageMaxWordCount", PageMaxWordCount.class,
"A map/reduce program that count the most frequent occurence of a word of each page");
exitCode = pgd.run(argv);
}
catch(Throwable e){
e.printStackTrace();
}
System.exit(exitCode);
}
示例4: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String[] args) {
ProgramDriver programDriver = new ProgramDriver();
int exitCode = -1;
try {
programDriver.addClass("export-table", Export.class,
"A map/reduce program that exports a table to a file.");
programDriver.addClass("import-table", Import.class,
"A map/reduce program that imports a table to a file.");
programDriver.driver(args);
exitCode = programDriver.run(args);
} catch (Throwable e) {
e.printStackTrace();
}
System.exit(exitCode);
}
示例5: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String args[]) {
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass(
"MNISTConverter",
MNISTConverter.class,
"A utility program that converts MNIST training and label datasets "
+ "into HDFS sequence file.");
pgd.addClass("MNISTEvaluator",
MNISTEvaluator.class,
"A utility program that evaluates trained model for the MNIST dataset");
pgd.addClass(
"MultiLayerPerceptron",
MultiLayerPerceptron.class,
"An example program that trains a multilayer perceptron model from HDFS sequence file.");
pgd.addClass("ExclusiveOrConverter",
ExclusiveOrConverter.class,
"A utility program that converts ExclusiveOR training and label datasets ");
pgd.addClass(
"ExclusiveOrRecurrentMultiLayerPerceptron",
ExclusiveOrRecurrentMultiLayerPerceptron.class,
"An example program that trains a recurrent multilayer perceptron model with exclusive or"
+ " from HDFS sequence file.");
pgd.addClass(
"MnistRecurrentMultiLayerPerceptron",
MnistRecurrentMultiLayerPerceptron.class,
"An example program that trains a recurrent multilayer perceptron model with MNIST"
+ " from HDFS sequence file.");
exitCode = pgd.run(args);
} catch (Throwable e) {
e.printStackTrace();
}
System.exit(exitCode);
}
示例6: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]) {
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("topk", TopK.class, "topk");
pgd.addClass("topkgen", TopKDataGen.class, "topkgen");
exitCode = pgd.run(argv);
} catch (Throwable e) {
e.printStackTrace();
}
System.exit(exitCode);
}
示例7: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]){
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("FaultToleranceTestRunner", FaultToleranceTestRunner.class,
"Run different DAGs for fault tolerance testing");
exitCode = pgd.run(argv);
}
catch(Throwable e){
e.printStackTrace();
}
System.exit(exitCode);
}
示例8: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]){
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("wordcount", WordCount.class,
"A native Tez wordcount program that counts the words in the input files.");
pgd.addClass("orderedwordcount", OrderedWordCount.class,
"Word Count with words sorted on frequency");
pgd.addClass("simplesessionexample", SimpleSessionExample.class,
"Example to run multiple dags in a session");
pgd.addClass("hashjoin", HashJoinExample.class,
"Identify all occurences of lines in file1 which also occur in file2 using hash join");
pgd.addClass("sortmergejoin", SortMergeJoinExample.class,
"Identify all occurences of lines in file1 which also occur in file2 using sort merge join");
pgd.addClass("joindatagen", JoinDataGen.class,
"Generate data to run the joinexample");
pgd.addClass("joinvalidate", JoinValidate.class,
"Validate data generated by joinexample and joindatagen");
pgd.addClass("cartesianproduct", CartesianProduct.class,
"Cartesian product of two datasets");
exitCode = pgd.run(argv);
} catch(Throwable e){
e.printStackTrace();
}
System.exit(exitCode);
}
示例9: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]){
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("CriticalPath", CriticalPathAnalyzer.class,
"Find the critical path of a DAG");
pgd.addClass("ContainerReuseAnalyzer", ContainerReuseAnalyzer.class,
"Print container reuse details in a DAG");
pgd.addClass("LocalityAnalyzer", LocalityAnalyzer.class,
"Print locality details in a DAG");
pgd.addClass("ShuffleTimeAnalyzer", ShuffleTimeAnalyzer.class,
"Analyze the shuffle time details in a DAG");
pgd.addClass("SkewAnalyzer", SkewAnalyzer.class,
"Analyze the skew details in a DAG");
pgd.addClass("SlowestVertexAnalyzer", SlowestVertexAnalyzer.class,
"Print slowest vertex details in a DAG");
pgd.addClass("SlowNodeAnalyzer", SlowNodeAnalyzer.class,
"Print node details in a DAG");
pgd.addClass("SlowTaskIdentifier", SlowTaskIdentifier.class,
"Print slow task details in a DAG");
pgd.addClass("SpillAnalyzer", SpillAnalyzerImpl.class,
"Print spill details in a DAG");
pgd.addClass("TaskAssignmentAnalyzer", TaskAssignmentAnalyzer.class,
"Print task-to-node assignment details of a DAG");
pgd.addClass("TaskConcurrencyAnalyzer", TaskConcurrencyAnalyzer.class,
"Print the task concurrency details in a DAG");
pgd.addClass("VertexLevelCriticalPathAnalyzer", VertexLevelCriticalPathAnalyzer.class,
"Find critical path at vertex level in a DAG");
pgd.addClass("OneOnOneEdgeAnalyzer", OneOnOneEdgeAnalyzer.class,
"Find out schedule misses in 1:1 edges in a DAG");
exitCode = pgd.run(argv);
} catch(Throwable e){
e.printStackTrace();
}
System.exit(exitCode);
}
示例10: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]){
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("wordcount", WordCount.class,
"A map/reduce program that counts the words in the input files.");
pgd.addClass("wordmean", WordMean.class,
"A map/reduce program that counts the average length of the words in the input files.");
pgd.addClass("wordmedian", WordMedian.class,
"A map/reduce program that counts the median length of the words in the input files.");
pgd.addClass("wordstandarddeviation", WordStandardDeviation.class,
"A map/reduce program that counts the standard deviation of the length of the words in the input files.");
pgd.addClass("aggregatewordcount", AggregateWordCount.class,
"An Aggregate based map/reduce program that counts the words in the input files.");
pgd.addClass("aggregatewordhist", AggregateWordHistogram.class,
"An Aggregate based map/reduce program that computes the histogram of the words in the input files.");
pgd.addClass("grep", Grep.class,
"A map/reduce program that counts the matches of a regex in the input.");
pgd.addClass("randomwriter", RandomWriter.class,
"A map/reduce program that writes 10GB of random data per node.");
pgd.addClass("randomtextwriter", RandomTextWriter.class,
"A map/reduce program that writes 10GB of random textual data per node.");
pgd.addClass("sort", Sort.class, "A map/reduce program that sorts the data written by the random writer.");
pgd.addClass("pi", QuasiMonteCarlo.class, QuasiMonteCarlo.DESCRIPTION);
pgd.addClass("bbp", BaileyBorweinPlouffe.class, BaileyBorweinPlouffe.DESCRIPTION);
pgd.addClass("distbbp", DistBbp.class, DistBbp.DESCRIPTION);
pgd.addClass("pentomino", DistributedPentomino.class,
"A map/reduce tile laying program to find solutions to pentomino problems.");
pgd.addClass("secondarysort", SecondarySort.class,
"An example defining a secondary sort to the reduce.");
pgd.addClass("sudoku", Sudoku.class, "A sudoku solver.");
pgd.addClass("join", Join.class, "A job that effects a join over sorted, equally partitioned datasets");
pgd.addClass("multifilewc", MultiFileWordCount.class, "A job that counts words from several files.");
pgd.addClass("dbcount", DBCountPageView.class, "An example job that count the pageview counts from a database.");
pgd.addClass("teragen", TeraGen.class, "Generate data for the terasort");
pgd.addClass("terasort", TeraSort.class, "Run the terasort");
pgd.addClass("teravalidate", TeraValidate.class, "Checking results of terasort");
exitCode = pgd.run(argv);
}
catch(Throwable e){
e.printStackTrace();
}
System.exit(exitCode);
}
示例11: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]){
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("wordcount", WordCountGPU.class,
"A map/reduce program that counts the words in the input files.");
pgd.addClass("wordmean", WordMean.class,
"A map/reduce program that counts the average length of the words in the input files.");
pgd.addClass("wordmedian", WordMedian.class,
"A map/reduce program that counts the median length of the words in the input files.");
pgd.addClass("wordstandarddeviation", WordStandardDeviation.class,
"A map/reduce program that counts the standard deviation of the length of the words in the input files.");
pgd.addClass("aggregatewordcount", AggregateWordCount.class,
"An Aggregate based map/reduce program that counts the words in the input files.");
pgd.addClass("aggregatewordhist", AggregateWordHistogram.class,
"An Aggregate based map/reduce program that computes the histogram of the words in the input files.");
pgd.addClass("grep", Grep.class,
"A map/reduce program that counts the matches of a regex in the input.");
pgd.addClass("randomwriter", RandomWriter.class,
"A map/reduce program that writes 10GB of random data per node.");
pgd.addClass("randomtextwriter", RandomTextWriter.class,
"A map/reduce program that writes 10GB of random textual data per node.");
pgd.addClass("sort", Sort.class, "A map/reduce program that sorts the data written by the random writer.");
pgd.addClass("pi", QuasiMonteCarlo.class, QuasiMonteCarlo.DESCRIPTION);
pgd.addClass("bbp", BaileyBorweinPlouffe.class, BaileyBorweinPlouffe.DESCRIPTION);
pgd.addClass("distbbp", DistBbp.class, DistBbp.DESCRIPTION);
pgd.addClass("pentomino", DistributedPentomino.class,
"A map/reduce tile laying program to find solutions to pentomino problems.");
pgd.addClass("secondarysort", SecondarySort.class,
"An example defining a secondary sort to the reduce.");
pgd.addClass("sudoku", Sudoku.class, "A sudoku solver.");
pgd.addClass("join", Join.class, "A job that effects a join over sorted, equally partitioned datasets");
pgd.addClass("multifilewc", MultiFileWordCount.class, "A job that counts words from several files.");
pgd.addClass("dbcount", DBCountPageView.class, "An example job that count the pageview counts from a database.");
pgd.addClass("teragen", TeraGen.class, "Generate data for the terasort");
pgd.addClass("terasort", TeraSort.class, "Run the terasort");
pgd.addClass("teravalidate", TeraValidate.class, "Checking results of terasort");
exitCode = pgd.run(argv);
}
catch(Throwable e){
e.printStackTrace();
}
System.exit(exitCode);
}
示例12: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]){
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("wordcount", WordCount.class,
"A map/reduce program that counts the words in the input files. multiple input paths supported...");
pgd.addClass("wordmean", WordMean.class,
"A map/reduce program that counts the average length of the words in the input files.");
pgd.addClass("wordmedian", WordMedian.class,
"A map/reduce program that counts the median length of the words in the input files.");
pgd.addClass("wordstandarddeviation", WordStandardDeviation.class,
"A map/reduce program that counts the standard deviation of the length of the words in the input files.");
pgd.addClass("aggregatewordcount", AggregateWordCount.class,
"An Aggregate based map/reduce program that counts the words in the input files.");
pgd.addClass("aggregatewordhist", AggregateWordHistogram.class,
"An Aggregate based map/reduce program that computes the histogram of the words in the input files.");
pgd.addClass("grep", Grep.class,
"A map/reduce program that counts the matches of a regex in the input.");
pgd.addClass("randomwriter", RandomWriter.class,
"A map/reduce program that writes 10GB of random data per node.");
pgd.addClass("randomtextwriter", RandomTextWriter.class,
"A map/reduce program that writes 10GB of random textual data per node.");
pgd.addClass("sort", Sort.class, "A map/reduce program that sorts the data written by the random writer.");
pgd.addClass("pi", QuasiMonteCarloModified.class, "Modified pi that accepts a job name, as well as standard <int> <int> args.");
pgd.addClass("bbp", BaileyBorweinPlouffe.class, BaileyBorweinPlouffe.DESCRIPTION);
pgd.addClass("distbbp", DistBbp.class, DistBbp.DESCRIPTION);
pgd.addClass("pentomino", DistributedPentomino.class,
"A map/reduce tile laying program to find solutions to pentomino problems.");
pgd.addClass("secondarysort", SecondarySort.class,
"An example defining a secondary sort to the reduce.");
pgd.addClass("sudoku", Sudoku.class, "A sudoku solver.");
pgd.addClass("join", Join.class, "A job that effects a join over sorted, equally partitioned datasets");
pgd.addClass("multifilewc", MultiFileWordCount.class, "A job that counts words from several files.");
pgd.addClass("dbcount", DBCountPageView.class, "An example job that count the pageview counts from a database.");
pgd.addClass("teragen", TeraGen.class, "Generate data for the terasort");
pgd.addClass("terasort", TeraSort.class, "Run the terasort");
pgd.addClass("teravalidate", TeraValidate.class, "Checking results of terasort");
exitCode = pgd.run(argv);
}
catch(Throwable e){
e.printStackTrace();
}
System.exit(exitCode);
}
示例13: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]){
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("wordcount", WordCount.class,
"A map/reduce program that counts the words in the input files.");
pgd.addClass("wordmean", WordMean.class,
"A map/reduce program that counts the average length of the words in the input files.");
pgd.addClass("wordmedian", WordMedian.class,
"A map/reduce program that counts the median length of the words in the input files.");
pgd.addClass("wordstandarddeviation", WordStandardDeviation.class,
"A map/reduce program that counts the standard deviation of the length of the words in the input files.");
pgd.addClass("aggregatewordcount", AggregateWordCount.class,
"An Aggregate based map/reduce program that counts the words in the input files.");
pgd.addClass("aggregatewordhist", AggregateWordHistogram.class,
"An Aggregate based map/reduce program that computes the histogram of the words in the input files.");
pgd.addClass("grep", Grep.class,
"A map/reduce program that counts the matches of a regex in the input.");
pgd.addClass("randomwriter", RandomWriter.class,
"A map/reduce program that writes 10GB of random data per node.");
pgd.addClass("randomtextwriter", RandomTextWriter.class,
"A map/reduce program that writes 10GB of random textual data per node.");
pgd.addClass("sort", Sort.class, "A map/reduce program that sorts the data written by the random writer.");
pgd.addClass("pi", QuasiMonteCarlo.class, QuasiMonteCarlo.DESCRIPTION);
pgd.addClass("bbp", BaileyBorweinPlouffe.class, BaileyBorweinPlouffe.DESCRIPTION);
pgd.addClass("distbbp", DistBbp.class, DistBbp.DESCRIPTION);
pgd.addClass("pentomino", DistributedPentomino.class,
"A map/reduce tile laying program to find solutions to pentomino problems.");
pgd.addClass("secondarysort", SecondarySort.class,
"An example defining a secondary sort to the reduce.");
pgd.addClass("sudoku", Sudoku.class, "A sudoku solver.");
pgd.addClass("join", Join.class, "A job that effects a join over sorted, equally partitioned datasets");
pgd.addClass("multifilewc", MultiFileWordCount.class, "A job that counts words from several files.");
pgd.addClass("dbcount", DBCountPageView.class, "An example job that count the pageview counts from a database.");
pgd.addClass("teragen", TeraGen.class, "Generate data for the terasort");
pgd.addClass("terasort", TeraSortLustre.class, "Run the terasort with Lustre");
pgd.addClass("terasort_hdfs", TeraSort.class, "Run the terasort with HDFS");
pgd.addClass("teravalidate", TeraValidate.class, "Checking results of terasort");
exitCode = pgd.run(argv);
}
catch(Throwable e){
e.printStackTrace();
}
System.exit(exitCode);
}
示例14: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]){
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("wordcount", WordCount.class,
"A map/reduce program that counts the words in the input files.");
pgd.addClass("mapredwordcount", MapredWordCount.class,
"A map/reduce program that counts the words in the input files"
+ " using the mapred apis.");
pgd.addClass("randomwriter", RandomWriter.class,
"A map/reduce program that writes 10GB of random data per node.");
pgd.addClass("randomtextwriter", RandomTextWriter.class,
"A map/reduce program that writes 10GB of random textual data per node.");
pgd.addClass("sort", Sort.class,
"A map/reduce program that sorts the data written by the random"
+ " writer.");
pgd.addClass("secondarysort", SecondarySort.class,
"An example defining a secondary sort to the reduce.");
pgd.addClass("join", Join.class,
"A job that effects a join over sorted, equally partitioned"
+ " datasets");
pgd.addClass("teragen", TeraGen.class,
"Generate data for the terasort");
pgd.addClass("terasort", TeraSort.class,
"Run the terasort");
pgd.addClass("teravalidate", TeraValidate.class,
"Checking results of terasort");
pgd.addClass("groupbyorderbymrrtest", GroupByOrderByMRRTest.class,
"A map-reduce-reduce program that does groupby-order by. Takes input"
+ " containing employee_name department name per line of input"
+ " and generates count of employees per department and"
+ " sorted on employee count");
pgd.addClass("mrrsleep", MRRSleepJob.class,
"MRR Sleep Job");
pgd.addClass("orderedwordcount", OrderedWordCount.class,
"Word Count with words sorted on frequency");
pgd.addClass("unionexample", UnionExample.class,
"Union example");
pgd.addClass("broadcastAndOneToOneExample", BroadcastAndOneToOneExample.class,
"BroadcastAndOneToOneExample example");
pgd.addClass("filterLinesByWord", FilterLinesByWord.class,
"Filters lines by the specified word using broadcast edge");
pgd.addClass("filterLinesByWordOneToOne", FilterLinesByWordOneToOne.class,
"Filters lines by the specified word using OneToOne edge");
pgd.addClass("intersect", IntersectExample.class,
"Identify all occurences of lines in file1 which also occur in file2");
pgd.addClass("intersectdatagen", IntersectDataGen.class,
"Generate data to run the intersect example");
pgd.addClass("intersectvalidate", IntersectValidate.class,
"Validate data generated by intersect and intersectdatagen");
exitCode = pgd.run(argv);
}
catch(Throwable e){
e.printStackTrace();
}
System.exit(exitCode);
}
示例15: main
import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]){
int exitCode = -1;
ProgramDriver pgd = new ProgramDriver();
try {
pgd.addClass("broadcastloadgen", BroadcastLoadGen.class,
"Run a DAG to generate load for Broadcast Shuffle");
pgd.addClass("rpcloadgen", RPCLoadGen.class,
"Run a DAG to generate load for the task to AM RPC");
pgd.addClass("wordcount", MapredWordCount.class,
"A map/reduce program that counts the words in the input files.");
pgd.addClass("mapredwordcount", MapredWordCount.class,
"A map/reduce program that counts the words in the input files"
+ " using the mapred apis.");
pgd.addClass("randomwriter", RandomWriter.class,
"A map/reduce program that writes 10GB of random data per node.");
pgd.addClass("randomtextwriter", RandomTextWriter.class,
"A map/reduce program that writes 10GB of random textual data per node.");
pgd.addClass("sort", Sort.class,
"A map/reduce program that sorts the data written by the random"
+ " writer.");
pgd.addClass("secondarysort", SecondarySort.class,
"An example defining a secondary sort to the reduce.");
pgd.addClass("join", Join.class,
"A job that effects a join over sorted, equally partitioned"
+ " datasets");
pgd.addClass("mrrsleep", MRRSleepJob.class,
"MRR Sleep Job");
pgd.addClass("testorderedwordcount", TestOrderedWordCount.class,
"Word Count with words sorted on frequency");
pgd.addClass("unionexample", UnionExample.class,
"Union example");
pgd.addClass("broadcastAndOneToOneExample", BroadcastAndOneToOneExample.class,
"BroadcastAndOneToOneExample example");
pgd.addClass("filterLinesByWord", FilterLinesByWord.class,
"Filters lines by the specified word using broadcast edge");
pgd.addClass("filterLinesByWordOneToOne", FilterLinesByWordOneToOne.class,
"Filters lines by the specified word using OneToOne edge");
pgd.addClass("multiplecommitsExample", MultipleCommitsExample.class,
"Job with multiple commits in both vertex group and vertex");
pgd.addClass("cartesianproduct", CartesianProduct.class,
"Cartesian Product Example");
exitCode = pgd.run(argv);
}
catch(Throwable e){
e.printStackTrace();
}
System.exit(exitCode);
}