當前位置: 首頁>>代碼示例>>Java>>正文


Java Job.addArchiveToClassPath方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapreduce.Job.addArchiveToClassPath方法的典型用法代碼示例。如果您正苦於以下問題:Java Job.addArchiveToClassPath方法的具體用法?Java Job.addArchiveToClassPath怎麽用?Java Job.addArchiveToClassPath使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapreduce.Job的用法示例。


在下文中一共展示了Job.addArchiveToClassPath方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testWithConf

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
private void testWithConf(Configuration conf) throws IOException,
    InterruptedException, ClassNotFoundException, URISyntaxException {
  // Create a temporary file of length 1.
  Path first = createTempFile("distributed.first", "x");
  // Create two jars with a single file inside them.
  Path second =
      makeJar(new Path(TEST_ROOT_DIR, "distributed.second.jar"), 2);
  Path third =
      makeJar(new Path(TEST_ROOT_DIR, "distributed.third.jar"), 3);
  Path fourth =
      makeJar(new Path(TEST_ROOT_DIR, "distributed.fourth.jar"), 4);


  Job job = Job.getInstance(conf);
  job.setMapperClass(DistributedCacheCheckerMapper.class);
  job.setReducerClass(DistributedCacheCheckerReducer.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  FileInputFormat.setInputPaths(job, first);
  // Creates the Job Configuration
  job.addCacheFile(
    new URI(first.toUri().toString() + "#distributed.first.symlink"));
  job.addFileToClassPath(second);
  job.addArchiveToClassPath(third);
  job.addCacheArchive(fourth.toUri());
  job.setMaxMapAttempts(1); // speed up failures

  job.submit();
  assertTrue(job.waitForCompletion(false));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:TestMRWithDistributedCache.java

示例2: main

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static void main(String[] args) throws ClassNotFoundException, IOException, InterruptedException {
		// configure n-gram mapreduce job
		Configuration conf1 = new Configuration();
		conf1.set("textinputformat.record.delimiter", "."); // read a complete sentence as a line
		conf1.set("GRAM_NUMBER", args[2]);
		Job job1 = Job.getInstance(conf1);
		job1.setNumReduceTasks(3);
		job1.setJobName("NGram");
		job1.setJarByClass(Dispatcher.class);
		job1.setMapperClass(NGramBuilder.NGramMapper.class);
		job1.setReducerClass(NGramBuilder.NGramReducer.class);
		job1.setOutputKeyClass(Text.class);
		job1.setOutputValueClass(IntWritable.class);
		job1.setInputFormatClass(TextInputFormat.class); // default format: reads lines of text files
		job1.setOutputFormatClass(TextOutputFormat.class); // default format: key \t value
		TextInputFormat.setInputPaths(job1, new Path(args[0]));
		TextOutputFormat.setOutputPath(job1, new Path(args[1]));
		job1.waitForCompletion(true); // language model won't start to build until the n-gram library completely built
		
		// configure language model mapreduce job
		Configuration conf2 = new Configuration();
		conf2.set("THRESHOLD", args[3]);
		conf2.set("TOP_K", args[4]);
		DBConfiguration.configureDB(conf2, "com.mysql.jdbc.Driver", "jdbc:mysql://127.0.0.1:3306/tp", "root", "123456"); // establish connection with mySQL database   
		Job job2 = Job.getInstance(conf2);
		job2.setNumReduceTasks(3);
		job2.setJobName("LModel");
		job2.setJarByClass(Dispatcher.class);			
		job2.addArchiveToClassPath(new Path("/mysql/mysql-connector-java-5.1.39-bin.jar")); // putting this jar file into jre/lib/ext is recommended	
		job2.setMapperClass(LanguageModel.ModelMapper.class);
		job2.setReducerClass(LanguageModel.ModelReducer.class);
		job2.setMapOutputKeyClass(Text.class); // Mapper emits different key type than the Reducer
		job2.setMapOutputValueClass(Text.class); // Mapper emits different value type than the Reducer
		job2.setOutputKeyClass(DBOutputWritable.class);
		job2.setOutputValueClass(NullWritable.class);
		job2.setInputFormatClass(TextInputFormat.class);
		job2.setOutputFormatClass(DBOutputFormat.class);
		TextInputFormat.setInputPaths(job2, new Path(args[1]));
		DBOutputFormat.setOutput(job2, "LanguageModel", new String[] {"starter", "follower", "probability"});
		System.exit(job2.waitForCompletion(true) ? 0 : 1);
}
 
開發者ID:JianyangZhang,項目名稱:Hot-Search-Terms,代碼行數:42,代碼來源:Dispatcher.java

示例3: _testDistributedCache

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public void _testDistributedCache(String jobJarPath) throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
         + " not found. Not running test.");
    return;
  }

  // Create a temporary file of length 1.
  Path first = createTempFile("distributed.first", "x");
  // Create two jars with a single file inside them.
  Path second =
      makeJar(new Path(TEST_ROOT_DIR, "distributed.second.jar"), 2);
  Path third =
      makeJar(new Path(TEST_ROOT_DIR, "distributed.third.jar"), 3);
  Path fourth =
      makeJar(new Path(TEST_ROOT_DIR, "distributed.fourth.jar"), 4);

  Job job = Job.getInstance(mrCluster.getConfig());
  
  // Set the job jar to a new "dummy" jar so we can check that its extracted 
  // properly
  job.setJar(jobJarPath);
  // Because the job jar is a "dummy" jar, we need to include the jar with
  // DistributedCacheChecker or it won't be able to find it
  Path distributedCacheCheckerJar = new Path(
          JarFinder.getJar(DistributedCacheChecker.class));
  job.addFileToClassPath(distributedCacheCheckerJar.makeQualified(
          localFs.getUri(), distributedCacheCheckerJar.getParent()));
  
  job.setMapperClass(DistributedCacheChecker.class);
  job.setOutputFormatClass(NullOutputFormat.class);

  FileInputFormat.setInputPaths(job, first);
  // Creates the Job Configuration
  job.addCacheFile(
      new URI(first.toUri().toString() + "#distributed.first.symlink"));
  job.addFileToClassPath(second);
  // The AppMaster jar itself
  job.addFileToClassPath(
          APP_JAR.makeQualified(localFs.getUri(), APP_JAR.getParent())); 
  job.addArchiveToClassPath(third);
  job.addCacheArchive(fourth.toUri());
  job.setMaxMapAttempts(1); // speed up failures

  job.submit();
  String trackingUrl = job.getTrackingURL();
  String jobId = job.getJobID().toString();
  Assert.assertTrue(job.waitForCompletion(false));
  Assert.assertTrue("Tracking URL was " + trackingUrl +
                    " but didn't Match Job ID " + jobId ,
        trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:53,代碼來源:TestMRJobs.java

示例4: main

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static void main(String[] args) throws ClassNotFoundException, IOException, InterruptedException {
	Configuration conf1 = new Configuration();
	conf1.set("textinputformat.record.delimiter", ".");
	conf1.set("noGram", args[2]);
	
	//First Job 
    Job job1 = Job.getInstance(conf1);
    job1.setJobName("NGram");
    job1.setJarByClass(Driver.class);

    job1.setMapperClass(NGramLibraryBuilder.NGramMapper.class);
    job1.setReducerClass(NGramLibraryBuilder.NGramReducer.class);

	job1.setOutputKeyClass(Text.class);
	job1.setOutputValueClass(IntWritable.class);

	job1.setInputFormatClass(TextInputFormat.class);
	job1.setOutputFormatClass(TextOutputFormat.class);

	TextInputFormat.setInputPaths(job1, new Path(args[0]));
	TextOutputFormat.setOutputPath(job1, new Path(args[1]));
    job1.waitForCompletion(true);

    //Second Job 
    Configuration conf2 = new Configuration();
    conf2.set("threashold", args[3]);
    conf2.set("n", args[4]);
    DBConfiguration.configureDB(conf2,
    	     "com.mysql.jdbc.Driver",   // driver class
    	     "jdbc:mysql://10.101.0.163:8889/test", //
    	     "root",    // user name
    	     "root"); //password
	
    Job job2 = Job.getInstance(conf2);
    job2.setJobName("LanguageModel");
    job2.setJarByClass(Driver.class);
    
    job2.addArchiveToClassPath(new Path("/mysql/mysql-connector-java-5.1.39-bin.jar"));

    job2.setMapOutputKeyClass(Text.class);
    job2.setMapOutputValueClass(Text.class);
    job2.setOutputKeyClass(Text.class);
    job2.setOutputValueClass(NullWritable.class);

    job2.setMapperClass(LanguageModel.Map.class);
    job2.setReducerClass(LanguageModel.Reduce.class);

    job2.setInputFormatClass(TextInputFormat.class);
	job2.setOutputFormatClass(DBOutputFormat.class);
    
	DBOutputFormat.setOutput(
		     job2,
		     "output",    // output table name
		     new String[] { "starting_phrase", "following_word", "count" }   //table columns
		     );
	
    //Path name for this job should match first job's output path name
	TextInputFormat.setInputPaths(job2, new Path(args[1]));
	System.exit(job2.waitForCompletion(true)?0:1);

}
 
開發者ID:yogykwan,項目名稱:mapreduce-samples,代碼行數:62,代碼來源:Driver.java


注:本文中的org.apache.hadoop.mapreduce.Job.addArchiveToClassPath方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。