當前位置: 首頁>>代碼示例>>Java>>正文


Java ProgramDriver.driver方法代碼示例

本文整理匯總了Java中org.apache.hadoop.util.ProgramDriver.driver方法的典型用法代碼示例。如果您正苦於以下問題:Java ProgramDriver.driver方法的具體用法?Java ProgramDriver.driver怎麽用?Java ProgramDriver.driver使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.util.ProgramDriver的用法示例。


在下文中一共展示了ProgramDriver.driver方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("gentest", DFSGeneralTest.class, "A map/reduce benchmark that supports running multi-thread operations in multiple machines");
    pgd.addClass("locktest", DFSLockTest.class, "A benchmark that spawns many threads and each thread run many configurable read/write FileSystem operations to test FSNamesystem lock's concurrency.");
    pgd.addClass("dirtest", DFSDirTest.class, "A map/reduce benchmark that creates many jobs and each job spawns many threads and each thread create/delete many dirs.");
    pgd.addClass("dfstest", DFSIOTest.class, "A map/reduce benchmark that creates many jobs and each jobs can create many files to test i/o rate per task of hadoop cluster.");
    pgd.addClass("structure-gen", StructureGenerator.class, "Create a structure of files and directories as an input for data-gen");
    pgd.addClass("data-gen", DataGenerator.class, "Create files and directories on cluster as inputs for load-gen");
    pgd.addClass("load-gen", LoadGenerator.class, "A tool to test the behavior of NameNode with different client loads.");
    pgd.addClass("testnn", TestNNThroughputBenchmark.class, "Test the behavior of the namenode on localhost." +
        " Here namenode is real and others are simulated");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:21,代碼來源:AllTestDriver.java

示例2: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
/**
 * Input are files of documents per line. The files are text and can either be compressed via .wrac.gz or not compressed.
 * MARCH NOT FINISHED
 * Prints these options to chose from:<br>
 * - [html] for html pages to be cleaned. <br>
 * - [wrac] for .wrac.gz files to be cleaned.<br>

 * @param argv : command line inputs
 */
public static void main(String argv[]) {
	int exitCode = -1;
	ProgramDriver pgd = new ProgramDriver();
	try {
		pgd.addClass("warc", WarcFileCleaner.class,
				"A MapReduce job to clean .warc.gz webpages from html and weird characters into set of features.");
		pgd.addClass(
				"html",
				PageCleaner.class,
				"A MapReduce job to clean html pages from stopwords, weird characters even alphanumerics. It further convert letters into lowercase. ");
		pgd.driver(argv);
	} catch (Throwable e) {
		e.printStackTrace();
	}

	System.exit(exitCode);
}
 
開發者ID:mahaucsb,項目名稱:pss,代碼行數:27,代碼來源:CleanPagesDriver.java

示例3: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
/**
 * Prints these options to chose from:<br>
 * - [clean] documents to produce document ID: bag of cleaned words. <br>
 * - [hash] bag of words into bag of hashed tokens.<br>
 * - Produce [sequence] records [LongWritable,FeatureWeightArrayWritable] <br>
 * - [seq] deals with writing/reading/combining sequence files.
 * 
 * @param argv : command line inputs
 */
public static void main(String argv[]) {
	int exitCode = -1;
	ProgramDriver pgd = new ProgramDriver();
	try {
		pgd.addClass("clean", CleanPagesDriver.class,
				"A MapReduce job to clean input pages. See options.");
		pgd.addClass(
				"hash", HashPagesDriver.class,
				"A MapReduce job to collect features then hash input data into [docId <features>] with associated weights if desired. ");
		pgd.addClass("seq", SequenceDriver.class,
				"For writing/reading/merging sequence files. See optoins.\n\n");
		pgd.driver(argv);
	} catch (Throwable e) {
		e.printStackTrace();
	}

	System.exit(exitCode);
}
 
開發者ID:mahaucsb,項目名稱:pss,代碼行數:28,代碼來源:PreprocessDriver.java

示例4: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
/**
 * Prints these options to chose from:<br>
 * - [read] read sequence files and print into console. <br>
 * - [convert] convert text files into sequence files.<br>
 * 
 * @param argv : command line inputs
 */
public static void main(String argv[]) {
	int exitCode = -1;
	ProgramDriver pgd = new ProgramDriver();
	try {
		pgd.addClass("write", SeqWriter.class,
				"A MapReduce job to convert hashed pages into sequence files.");
		pgd.addClass("read", SeqReader.class,
				"Print out sequence pages in readable format.");
		pgd.addClass("combine", SeqFilesCombiner.class,
				"A regular java program to combine sequence records from multiple files into one file in hdfs.");
		pgd.driver(argv);
	} catch (Throwable e) {
		e.printStackTrace();
	}

	System.exit(exitCode);
}
 
開發者ID:mahaucsb,項目名稱:pss,代碼行數:25,代碼來源:SequenceDriver.java

示例5: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]) {
	int exitCode = -1;
	ProgramDriver pgd = new ProgramDriver();
	try {
		pgd.addClass("randomlsh", ProjectionLshDriver.class,
				"Partition input vectors according to random projections.");
		pgd.addClass("minhashlsh", MinHashLshDriver.class,
				"Partition input vectors according to minhash values.");
		pgd.addClass("lshstat", LshStat.class, "Collect statistics from binray lshpartitions/");
		pgd.driver(argv);
	} catch (Throwable e) {
		e.printStackTrace();
	}

	System.exit(exitCode);
}
 
開發者ID:mahaucsb,項目名稱:pss,代碼行數:17,代碼來源:LshDriver.java

示例6: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String[] args) {
  ProgramDriver programDriver = new ProgramDriver();
  int exitCode = -1;
  try {
    programDriver.addClass("wordcount-hbase", WordCountHBase.class,
        "A map/reduce program that counts the words in the input files.");
    programDriver.addClass("export-table", Export.class,
        "A map/reduce program that exports a table to a file.");
    //programDriver.addClass("cellcounter", CellCounter.class, "Count them cells!");
    programDriver.driver(args);
    exitCode = programDriver.run(args);
  } catch (Throwable e) {
    e.printStackTrace();
  }
  System.exit(exitCode);
}
 
開發者ID:GoogleCloudPlatform,項目名稱:cloud-bigtable-examples,代碼行數:17,代碼來源:WordCountDriver.java

示例7: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

        ProgramDriver programDriver = new ProgramDriver();
        int exitCode = -1;
        try {
            programDriver.addClass("readdepth-intervals", ComputeReadDepthInInterval.class, "Computes read depth over a given size interval");
            programDriver.addClass("readdepth-hist", DepthHistogram.class, "Computes distribution of read depths");
            programDriver.addClass("germline", GermlinePipeline.class, "Standard germline variant caller");
            programDriver.addClass("somatic", SomaticPipeline.class, "Standard somatic variant caller, takes tumor/normal input");

            programDriver.driver(args);

            exitCode = 0;
        } catch (Throwable e) {
            e.printStackTrace();
        }

        System.exit(exitCode);
    }
 
開發者ID:arahuja,項目名稱:varcrunch,代碼行數:20,代碼來源:VarCrunchDriver.java

示例8: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
        "Single process HDFS and MR cluster.");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:42,代碼來源:AllTestDriver.java

示例9: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String[] args) {
  ProgramDriver programDriver = new ProgramDriver();
  int exitCode = -1;
  try {
    programDriver.addClass("export-table", Export.class,
        "A map/reduce program that exports a table to a file.");
    programDriver.addClass("import-table", Import.class,
        "A map/reduce program that imports a table to a file.");
    programDriver.driver(args);
    exitCode = programDriver.run(args);
  } catch (Throwable e) {
    e.printStackTrace();
  }
  System.exit(exitCode);
}
 
開發者ID:dmmcerlean,項目名稱:cloud-bigtable-client,代碼行數:16,代碼來源:Driver.java

示例10: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]) {
    int exitCode = -1;
    ProgramDriver pgd = new ProgramDriver();
    try {
        pgd.addClass("dummyjob", DummyJob.class, "");
        pgd.addClass("recordbuild", RecordBuild.class, "");
        pgd.addClass("fuzzyjoin", FuzzyJoin.class, "");
        pgd.addClass("tokensbasic", TokensBasic.class, "");
        pgd.addClass("tokensimproved", TokensImproved.class, "");
        // pgd.addClass("ridpairsbasic", RIDPairsBasic.class, "");
        pgd.addClass("ridpairsimproved", RIDPairsImproved.class, "");
        pgd.addClass("ridpairsppjoin", RIDPairsPPJoin.class, "");
        pgd.addClass("recordpairsbasic", RecordPairsBasic.class, "");
        pgd.addClass("recordpairsimproved", RecordPairsImproved.class, "");
        pgd.addClass("ridrecordpairsimproved",
                RIDRecordPairsImproved.class, "");
        pgd.addClass("ridrecordpairsppjoin", RIDRecordPairsPPJoin.class, "");
        pgd.addClass("recordgenerate", RecordGenerate.class, "");
        pgd.addClass("recordbalance", RecordBalance.class, "");
        pgd.driver(argv);
        // Success
        exitCode = 0;
    } catch (Throwable e) {
        e.printStackTrace();
    }
    System.exit(exitCode);
}
 
開發者ID:TonyApuzzo,項目名稱:fuzzyjoin,代碼行數:28,代碼來源:FuzzyJoinDriver.java

示例11: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
/**
  * Main entry point for jar file.
  *
  * @param args  The command line parameters.
  * @throws Throwable When the selection fails.
  */
public static void main(String[] args) throws Throwable {
	ProgramDriver pgd = new ProgramDriver();
	pgd.addClass(ImportFromFile.NAME, ImportFromFile.class, "Import from file");
	pgd.addClass(ImportFromFile2.NAME, ImportFromFile2.class, "Import from file (with dependencies)");
	pgd.addClass(AnalyzeData.NAME, AnalyzeData.class, "Analyze imported JSON");
	pgd.addClass(AnalyzeSnapshotData.NAME, AnalyzeSnapshotData.class, "Analyze imported JSON from snapshot");
	pgd.addClass(ParseJson.NAME, ParseJson.class, "Parse JSON into columns");
	pgd.addClass(ParseJson2.NAME, ParseJson2.class, "Parse JSON into columns (map only)");
	pgd.addClass(ParseJsonMulti.NAME, ParseJsonMulti.class, "Parse JSON into multiple tables");
	pgd.driver(args);
}
 
開發者ID:lhfei,項目名稱:hbase-in-action,代碼行數:18,代碼來源:Driver.java

示例12: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String args[]) throws UnsupportedEncodingException {

		ProgramDriver pgd = new ProgramDriver();
		try {
			pgd.addClass("lengthsort", LengthSortMain.class, "\tSort documents based on length");
			pgd.addClass("normsort", NormSortMain.class, "\tSort documents based on p-norm");
			pgd.addClass("maxwsort", MaxwSortMain.class, "\tSort documents based on max weight");// del
			pgd.addClass("sigsort", SigSortMain.class, "\tSort documents based on their signatures");
			pgd.driver(args);
		} catch (Throwable e) {
			e.printStackTrace();
		}
	}
 
開發者ID:mahaucsb,項目名稱:pss,代碼行數:14,代碼來源:SortDriver.java

示例13: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
                 "A map/reduce benchmark that compares the performance " + 
                 "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
    pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
    pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
    pgd.addClass("testsequencefileinputformat", TestSequenceFileInputFormat.class, "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
    pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
    pgd.addClass("testmapredsort", SortValidator.class, 
                 "A map/reduce program that validates the map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
                 "A map/reduce program that works on a very big " + 
                 "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, "Generic map/reduce load generator");
    pgd.addClass("filebench", FileBench.class, "Benchmark SequenceFile(Input|Output)Format (block,record compressed and uncompressed), Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
                 "measure hdfs throughput");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
 
開發者ID:Seagate,項目名稱:hadoop-on-lustre,代碼行數:41,代碼來源:AllTestDriver.java

示例14: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String[] args) {
  int exitCode = -1;
  ProgramDriver pgd = new ProgramDriver();
  try {
    
    pgd.addClass("rangequery", RangeQuery.class,
        "Finds all objects in the query range given by a Prism");
    pgd.addClass("count", RecordCount.class,
            "Count number of records in the query range given by a Prism");
    pgd.addClass("index", Repartition.class,
        "Builds an index on an input file");
    pgd.addClass("mbr", FileMBR.class,
        "Finds the minimal bounding Prism of an input file");
    pgd.addClass("readfile", ReadFile.class,
        "Retrieve some information about the global index of a file");

    pgd.addClass("sample", Sampler.class,
        "Reads a random sample from the input file");

    pgd.addClass("generate", RandomSpatialGenerator.class,
        "Generates a random file containing spatial data");


    pgd.addClass("plot", Plot.class,
        "Plots a file to an image");

    

    pgd.driver(args);
    
    // Success
    exitCode = 0;
  }
  catch(Throwable e){
    e.printStackTrace();
  }
  
  System.exit(exitCode);
}
 
開發者ID:t0nyren,項目名稱:spatedb,代碼行數:40,代碼來源:Main.java

示例15: main

import org.apache.hadoop.util.ProgramDriver; //導入方法依賴的package包/類
public static void main(String argv[]){
  int exitCode = -1;

  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("wordcount", WordCount.class, 
    "A map/reduce program that counts the words in the input files.");
    pgd.addClass("aggregatewordcount", AggregateWordCount.class, 
    "An Aggregate based map/reduce program that counts the words in the input files.");
    pgd.addClass("aggregatewordhist", AggregateWordHistogram.class, 
    "An Aggregate based map/reduce program that computes the histogram of the words in the input files.");
    pgd.addClass("grep", Grep.class, 
    "A map/reduce program that counts the matches of a regex in the input.");
    pgd.addClass("randomwriter", RandomWriter.class, 
    "A map/reduce program that writes 10GB of random data per node.");
    pgd.addClass("randomtextwriter", RandomTextWriter.class, 
    "A map/reduce program that writes 10GB of random textual data per node.");
    pgd.addClass("sort", Sort.class, "A map/reduce program that sorts the data written by the random writer.");
    pgd.addClass("pi", PiEstimator.class, "A map/reduce program that estimates Pi using monte-carlo method.");
    pgd.addClass("pentomino", DistributedPentomino.class,
    "A map/reduce tile laying program to find solutions to pentomino problems.");
    pgd.addClass("secondarysort", SecondarySort.class,
    "An example defining a secondary sort to the reduce.");
    pgd.addClass("sudoku", Sudoku.class, "A sudoku solver.");
    pgd.addClass("sleep", SleepJob.class, "A job that sleeps at each map and reduce task.");
    pgd.addClass("join", Join.class, "A job that effects a join over sorted, equally partitioned datasets");
    pgd.addClass("multifilewc", MultiFileWordCount.class, "A job that counts words from several files.");
    pgd.addClass("dbcount", DBCountPageView.class, "An example job that count the pageview counts from a database.");
    pgd.addClass("teragen", TeraGen.class, "Generate data for the terasort");
    pgd.addClass("terasort", TeraSort.class, "Run the terasort");
    pgd.addClass("teravalidate", TeraValidate.class, "Checking results of terasort");
    pgd.addClass("kmeans", Kmeans.class, "Kmeans on movies data");
    pgd.addClass("classification", Classification.class, "Classify movies into clusters");
    pgd.addClass("histogram_movies", HistogramMovies.class,
    "A map/reduce program that gives a histogram of movies based on ratings.");
    pgd.addClass("histogram_ratings", HistogramRatings.class,
    "A map/reduce program that gives a histogram of users ratings on movies.");
    pgd.addClass("selfjoin", SelfJoin.class,
    "A map/reduce program that creates k+1 associations given set of k-field associations");
    pgd.addClass("invertedindex", InvertedIndex.class,
    "A map/reduce program that creates an inverted index of documents.");
    pgd.addClass("adjlist", AdjList.class,
    "A map/reduce program that finds adjacency list of graph nodes.");
    pgd.addClass("termvectorperhost", TermVectorPerHost.class,
    "A map/reduce program that creates the term-vectors (frequency of words) per document.");
    pgd.addClass("sequencecount", SequenceCount.class,
    "A map/reduce program that counts the occurrence of consecutive words in the input files.");
    pgd.addClass("rankedinvertedindex", RankedInvertedIndex.class,
    "A map/reduce program that creates the top k document lists per word");


    pgd.driver(argv);

    // Success
    exitCode = 0;
  }
  catch(Throwable e){
    e.printStackTrace();
  }

  System.exit(exitCode);
}
 
開發者ID:yncxcw,項目名稱:big-c,代碼行數:63,代碼來源:ExampleDriver.java


注:本文中的org.apache.hadoop.util.ProgramDriver.driver方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。