當前位置: 首頁>>代碼示例>>Java>>正文


Java ProgramDriver類代碼示例

本文整理匯總了Java中org.apache.hadoop.util.ProgramDriver的典型用法代碼示例。如果您正苦於以下問題:Java ProgramDriver類的具體用法?Java ProgramDriver怎麽用?Java ProgramDriver使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


ProgramDriver類屬於org.apache.hadoop.util包,在下文中一共展示了ProgramDriver類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();

  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table.");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table.");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster.");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files.");
  pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" +
      " the specific snapshot to a given FileSystem.");

  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:Driver.java

示例2: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();
  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " the data from tables in two different clusters. WARNING: It" +
      " doesn't work for incrementColumnValues'd cells since the" +
      " timestamp is changed after being appended to the log.");
  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
 
開發者ID:fengchen8086,項目名稱:LCIndex-HBase-0.94.16,代碼行數:25,代碼來源:Driver.java

示例3: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
/**
 * A description of the test program for running all the tests using jar file
 */
public static void main(String argv[]){
  ProgramDriver pgd = new ProgramDriver();
  try {
    pgd.addClass("gentest", DFSGeneralTest.class, "A map/reduce benchmark that supports running multi-thread operations in multiple machines");
    pgd.addClass("locktest", DFSLockTest.class, "A benchmark that spawns many threads and each thread run many configurable read/write FileSystem operations to test FSNamesystem lock's concurrency.");
    pgd.addClass("dirtest", DFSDirTest.class, "A map/reduce benchmark that creates many jobs and each job spawns many threads and each thread create/delete many dirs.");
    pgd.addClass("dfstest", DFSIOTest.class, "A map/reduce benchmark that creates many jobs and each jobs can create many files to test i/o rate per task of hadoop cluster.");
    pgd.addClass("structure-gen", StructureGenerator.class, "Create a structure of files and directories as an input for data-gen");
    pgd.addClass("data-gen", DataGenerator.class, "Create files and directories on cluster as inputs for load-gen");
    pgd.addClass("load-gen", LoadGenerator.class, "A tool to test the behavior of NameNode with different client loads.");
    pgd.addClass("testnn", TestNNThroughputBenchmark.class, "Test the behavior of the namenode on localhost." +
        " Here namenode is real and others are simulated");
    pgd.driver(argv);
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:21,代碼來源:AllTestDriver.java

示例4: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
public static void main(String argv[]) {
	int exitCode = -1;
	ProgramDriver pgd = new ProgramDriver();
	try {
		pgd.addClass("wordcount", WordCount.class,
				"A map/reduce program that counts the words in the input files.");

		pgd.addClass("xflowstatic", XflowStatic.class,
				"A map/reduce program that static xflow from data files.");

		exitCode = pgd.run(argv);
	} catch (Throwable e) {
		e.printStackTrace();
	}

	System.exit(exitCode);
}
 
開發者ID:micmiu,項目名稱:bigdata-tutorial,代碼行數:18,代碼來源:ExampleDriver.java

示例5: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
public static void main(String args[]) throws UnsupportedEncodingException {

		int exitCode = -1;
		ProgramDriver pgd = new ProgramDriver();
		try {
			JobConf job = new JobConf();
			new GenericOptionsParser(job, args);
			String metric = job.get(Config.METRIC_PROPERTY,Config.METRIC_VALUE).toLowerCase();
			if(metric.contains("j")){
				JaccardCoarsePartitionMain.main(args);
			}else 
				HolderCosinePartitionMain.main(args);
			//			// pgd.addClass("cpartitionw", CosineWeightPartitionMain.class,
			//			// "\tCosine static partitioning on weight sorted documents");
			//			pgd.addClass("cpartitiona", CosineAllPartitionMain.class,
			//					"\tCosine static partitioning on ALL sorted documents");
			//pgd.driver(args);
			exitCode = 0;
		} catch (Throwable e) {
			e.printStackTrace();
		}
		System.exit(exitCode);
	}
 
開發者ID:mahaucsb,項目名稱:pss,代碼行數:24,代碼來源:PartDriver.java

示例6: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
/**
 * Input are files of documents per line. The files are text and can either be compressed via .wrac.gz or not compressed.
 * MARCH NOT FINISHED
 * Prints these options to chose from:<br>
 * - [html] for html pages to be cleaned. <br>
 * - [wrac] for .wrac.gz files to be cleaned.<br>

 * @param argv : command line inputs
 */
public static void main(String argv[]) {
	int exitCode = -1;
	ProgramDriver pgd = new ProgramDriver();
	try {
		pgd.addClass("warc", WarcFileCleaner.class,
				"A MapReduce job to clean .warc.gz webpages from html and weird characters into set of features.");
		pgd.addClass(
				"html",
				PageCleaner.class,
				"A MapReduce job to clean html pages from stopwords, weird characters even alphanumerics. It further convert letters into lowercase. ");
		pgd.driver(argv);
	} catch (Throwable e) {
		e.printStackTrace();
	}

	System.exit(exitCode);
}
 
開發者ID:mahaucsb,項目名稱:pss,代碼行數:27,代碼來源:CleanPagesDriver.java

示例7: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
/**
 * Prints these options to chose from:<br>
 * - [clean] documents to produce document ID: bag of cleaned words. <br>
 * - [hash] bag of words into bag of hashed tokens.<br>
 * - Produce [sequence] records [LongWritable,FeatureWeightArrayWritable] <br>
 * - [seq] deals with writing/reading/combining sequence files.
 * 
 * @param argv : command line inputs
 */
public static void main(String argv[]) {
	int exitCode = -1;
	ProgramDriver pgd = new ProgramDriver();
	try {
		pgd.addClass("clean", CleanPagesDriver.class,
				"A MapReduce job to clean input pages. See options.");
		pgd.addClass(
				"hash", HashPagesDriver.class,
				"A MapReduce job to collect features then hash input data into [docId <features>] with associated weights if desired. ");
		pgd.addClass("seq", SequenceDriver.class,
				"For writing/reading/merging sequence files. See optoins.\n\n");
		pgd.driver(argv);
	} catch (Throwable e) {
		e.printStackTrace();
	}

	System.exit(exitCode);
}
 
開發者ID:mahaucsb,項目名稱:pss,代碼行數:28,代碼來源:PreprocessDriver.java

示例8: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
/**
 * Prints these options to chose from:<br>
 * - [read] read sequence files and print into console. <br>
 * - [convert] convert text files into sequence files.<br>
 * 
 * @param argv : command line inputs
 */
public static void main(String argv[]) {
	int exitCode = -1;
	ProgramDriver pgd = new ProgramDriver();
	try {
		pgd.addClass("write", SeqWriter.class,
				"A MapReduce job to convert hashed pages into sequence files.");
		pgd.addClass("read", SeqReader.class,
				"Print out sequence pages in readable format.");
		pgd.addClass("combine", SeqFilesCombiner.class,
				"A regular java program to combine sequence records from multiple files into one file in hdfs.");
		pgd.driver(argv);
	} catch (Throwable e) {
		e.printStackTrace();
	}

	System.exit(exitCode);
}
 
開發者ID:mahaucsb,項目名稱:pss,代碼行數:25,代碼來源:SequenceDriver.java

示例9: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
public static void main(String argv[]) {
	int exitCode = -1;
	ProgramDriver pgd = new ProgramDriver();
	try {
		pgd.addClass("randomlsh", ProjectionLshDriver.class,
				"Partition input vectors according to random projections.");
		pgd.addClass("minhashlsh", MinHashLshDriver.class,
				"Partition input vectors according to minhash values.");
		pgd.addClass("lshstat", LshStat.class, "Collect statistics from binray lshpartitions/");
		pgd.driver(argv);
	} catch (Throwable e) {
		e.printStackTrace();
	}

	System.exit(exitCode);
}
 
開發者ID:mahaucsb,項目名稱:pss,代碼行數:17,代碼來源:LshDriver.java

示例10: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
/**
 * @param args
 * @throws Throwable
 */
public static void main(String[] args) throws Throwable {
  ProgramDriver pgd = new ProgramDriver();

  pgd.addClass(RowCounter.NAME, RowCounter.class,
    "Count rows in HBase table.");
  pgd.addClass(CellCounter.NAME, CellCounter.class,
    "Count cells in HBase table.");
  pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
  pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
  pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
  pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
               "Complete a bulk data load.");
  pgd.addClass(CopyTable.NAME, CopyTable.class,
      "Export a table from local cluster to peer cluster.");
  pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
      " data from tables in two different clusters. It" +
      " doesn't work for incrementColumnValues'd cells since" +
      " timestamp is changed after appending to WAL.");
  pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files.");
  pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" +
      " the specific snapshot to a given FileSystem.");

  ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
    invoke(pgd, new Object[]{args});
}
 
開發者ID:apache,項目名稱:hbase,代碼行數:30,代碼來源:Driver.java

示例11: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
public static void main(String[] args) {
  ProgramDriver programDriver = new ProgramDriver();
  int exitCode = -1;
  try {
    programDriver.addClass("wordcount-hbase", WordCountHBase.class,
        "A map/reduce program that counts the words in the input files.");
    programDriver.addClass("export-table", Export.class,
        "A map/reduce program that exports a table to a file.");
    //programDriver.addClass("cellcounter", CellCounter.class, "Count them cells!");
    programDriver.driver(args);
    exitCode = programDriver.run(args);
  } catch (Throwable e) {
    e.printStackTrace();
  }
  System.exit(exitCode);
}
 
開發者ID:GoogleCloudPlatform,項目名稱:cloud-bigtable-examples,代碼行數:17,代碼來源:WordCountDriver.java

示例12: main

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
public static void main(String[] args) throws Exception {

        ProgramDriver programDriver = new ProgramDriver();
        int exitCode = -1;
        try {
            programDriver.addClass("readdepth-intervals", ComputeReadDepthInInterval.class, "Computes read depth over a given size interval");
            programDriver.addClass("readdepth-hist", DepthHistogram.class, "Computes distribution of read depths");
            programDriver.addClass("germline", GermlinePipeline.class, "Standard germline variant caller");
            programDriver.addClass("somatic", SomaticPipeline.class, "Standard somatic variant caller, takes tumor/normal input");

            programDriver.driver(args);

            exitCode = 0;
        } catch (Throwable e) {
            e.printStackTrace();
        }

        System.exit(exitCode);
    }
 
開發者ID:arahuja,項目名稱:varcrunch,代碼行數:20,代碼來源:VarCrunchDriver.java

示例13: CoreTestDriver

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
public CoreTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsetfile", TestSetFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testarrayfile", TestArrayFile.class, 
        "A test for flat files of binary key/value pairs.");
    pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
    pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:14,代碼來源:CoreTestDriver.java

示例14: YarnTestDriver

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
public YarnTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass(TestZKRMStateStorePerf.class.getSimpleName(),
        TestZKRMStateStorePerf.class,
        "ZKRMStateStore i/o benchmark.");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:11,代碼來源:YarnTestDriver.java

示例15: HdfsTestDriver

import org.apache.hadoop.util.ProgramDriver; //導入依賴的package包/類
public HdfsTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("dfsthroughput", BenchmarkThroughput.class, 
        "measure hdfs throughput");
    pgd.addClass("minidfscluster", MiniDFSClusterManager.class, 
        "Run a single-process mini DFS cluster");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:HdfsTestDriver.java


注:本文中的org.apache.hadoop.util.ProgramDriver類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。