當前位置: 首頁>>代碼示例>>Java>>正文


Java IdentityReducer類代碼示例

本文整理匯總了Java中org.apache.hadoop.mapred.lib.IdentityReducer的典型用法代碼示例。如果您正苦於以下問題:Java IdentityReducer類的具體用法?Java IdentityReducer怎麽用?Java IdentityReducer使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


IdentityReducer類屬於org.apache.hadoop.mapred.lib包,在下文中一共展示了IdentityReducer類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: configureWaitingJobConf

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
/**
 * Configure a waiting job
 */
static void configureWaitingJobConf(JobConf jobConf, Path inDir,
                                    Path outputPath, int numMaps, int numRed,
                                    String jobName, String mapSignalFilename,
                                    String redSignalFilename)
throws IOException {
  jobConf.setJobName(jobName);
  jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
  jobConf.setOutputFormat(SequenceFileOutputFormat.class);
  FileInputFormat.setInputPaths(jobConf, inDir);
  FileOutputFormat.setOutputPath(jobConf, outputPath);
  jobConf.setMapperClass(UtilsForTests.HalfWaitingMapper.class);
  jobConf.setReducerClass(IdentityReducer.class);
  jobConf.setOutputKeyClass(BytesWritable.class);
  jobConf.setOutputValueClass(BytesWritable.class);
  jobConf.setInputFormat(RandomInputFormat.class);
  jobConf.setNumMapTasks(numMaps);
  jobConf.setNumReduceTasks(numRed);
  jobConf.setJar("build/test/mapred/testjar/testjob.jar");
  jobConf.set(getTaskSignalParameter(true), mapSignalFilename);
  jobConf.set(getTaskSignalParameter(false), redSignalFilename);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:UtilsForTests.java

示例2: runJobSucceed

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
public static RunningJob runJobSucceed(JobConf conf, Path inDir, Path outDir)
       throws IOException {
  conf.setJobName("test-job-succeed");
  conf.setMapperClass(IdentityMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  
  RunningJob job = UtilsForTests.runJob(conf, inDir, outDir);
  long sleepCount = 0;
  while (!job.isComplete()) {
    try {
      if (sleepCount > 300) { // 30 seconds
        throw new IOException("Job didn't finish in 30 seconds");
      }
      Thread.sleep(100);
      sleepCount++;
    } catch (InterruptedException e) {
      break;
    }
  }

  return job;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:23,代碼來源:UtilsForTests.java

示例3: runJobFail

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
public static RunningJob runJobFail(JobConf conf, Path inDir, Path outDir)
       throws IOException {
  conf.setJobName("test-job-fail");
  conf.setMapperClass(FailMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  conf.setMaxMapAttempts(1);
  
  RunningJob job = UtilsForTests.runJob(conf, inDir, outDir);
  long sleepCount = 0;
  while (!job.isComplete()) {
    try {
      if (sleepCount > 300) { // 30 seconds
        throw new IOException("Job didn't finish in 30 seconds");
      }
      Thread.sleep(100);
      sleepCount++;
    } catch (InterruptedException e) {
      break;
    }
  }

  return job;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:UtilsForTests.java

示例4: testEmptyJoin

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
public void testEmptyJoin() throws Exception {
  JobConf job = new JobConf();
  Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
  Path[] src = { new Path(base,"i0"), new Path("i1"), new Path("i2") };
  job.set("mapreduce.join.expr", CompositeInputFormat.compose("outer",
      Fake_IF.class, src));
  job.setInputFormat(CompositeInputFormat.class);
  FileOutputFormat.setOutputPath(job, new Path(base, "out"));

  job.setMapperClass(IdentityMapper.class);
  job.setReducerClass(IdentityReducer.class);
  job.setOutputKeyClass(IncomparableKey.class);
  job.setOutputValueClass(NullWritable.class);

  JobClient.runJob(job);
  base.getFileSystem(job).delete(base, true);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestDatamerge.java

示例5: checkCompression

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
private void checkCompression(boolean compressMapOutputs,
                              CompressionType redCompression,
                              boolean includeCombine
                              ) throws Exception {
  JobConf conf = new JobConf(TestMapRed.class);
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = FileSystem.get(conf);
  fs.delete(testdir, true);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setMapperClass(MyMap.class);
  conf.setReducerClass(MyReduce.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setOutputFormat(SequenceFileOutputFormat.class);
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
  if (includeCombine) {
    conf.setCombinerClass(IdentityReducer.class);
  }
  conf.setCompressMapOutput(compressMapOutputs);
  SequenceFileOutputFormat.setOutputCompressionType(conf, redCompression);
  try {
    if (!fs.mkdirs(testdir)) {
      throw new IOException("Mkdirs failed to create " + testdir.toString());
    }
    if (!fs.mkdirs(inDir)) {
      throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
    Path inFile = new Path(inDir, "part0");
    DataOutputStream f = fs.create(inFile);
    f.writeBytes("Owen was here\n");
    f.writeBytes("Hadoop is fun\n");
    f.writeBytes("Is this done, yet?\n");
    f.close();
    RunningJob rj = JobClient.runJob(conf);
    assertTrue("job was complete", rj.isComplete());
    assertTrue("job was successful", rj.isSuccessful());
    Path output = new Path(outDir,
                           Task.getOutputName(0));
    assertTrue("reduce output exists " + output, fs.exists(output));
    SequenceFile.Reader rdr = 
      new SequenceFile.Reader(fs, output, conf);
    assertEquals("is reduce output compressed " + output, 
                 redCompression != CompressionType.NONE, 
                 rdr.isCompressed());
    rdr.close();
  } finally {
    fs.delete(testdir, true);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:53,代碼來源:TestMapRed.java

示例6: launchJob

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
static RunningJob launchJob(JobConf jobConf, Path inDir, Path outputPath,
    int numMaps, String jobName) throws IOException {
  jobConf.setJobName(jobName);
  jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
  jobConf.setOutputFormat(SequenceFileOutputFormat.class);
  FileInputFormat.setInputPaths(jobConf, inDir);
  FileOutputFormat.setOutputPath(jobConf, outputPath);
  jobConf.setMapperClass(IdentityMapper.class);
  jobConf.setReducerClass(IdentityReducer.class);
  jobConf.setOutputKeyClass(BytesWritable.class);
  jobConf.setOutputValueClass(BytesWritable.class);
  jobConf.setNumMapTasks(numMaps);
  jobConf.setNumReduceTasks(0);
  jobConf.setJar("build/test/mapred/testjar/testjob.jar");
  return JobClient.runJob(jobConf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:TestMultipleLevelCaching.java

示例7: run

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
@Override
public void run(String[] args) throws Exception {
  Flags flags = new Flags();
  flags.add("input");
  flags.add("output");
  flags.add("prob");
  flags.parseAndCheck(args);

  prob = flags.getDouble("prob");

  JobConf job = new JobConf(this.getClass());
  job.set("prob", flags.getString("prob"));
  MapReduceHelper.runTextSeqFileMapReduce(
      job,
      SamplingMapper.class, IdentityReducer.class,
      flags.getString("input"), flags.getString("output"));
}
 
開發者ID:thunlp,項目名稱:THUTag,代碼行數:18,代碼來源:Sample.java

示例8: testTaskTempDir

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
/**
 * Tests task's temp directory.
 * 
 * In this test, we give different values to mapreduce.task.tmp.dir
 * both relative and absolute. And check whether the temp directory 
 * is created. We also check whether java.io.tmpdir value is same as 
 * the directory specified. We create a temp file and check if is is 
 * created in the directory specified.
 */
@Test
public void testTaskTempDir(){
  try {
    JobConf conf = new JobConf(mr.getConfig());
    
    // intialize input, output directories
    Path inDir = new Path("testing/wc/input");
    Path outDir = new Path("testing/wc/output");
    String input = "The input";
    configure(conf, inDir, outDir, input, 
        MapClass.class, IdentityReducer.class);
    launchTest(conf, inDir, outDir, input);
    
  } catch(Exception e) {
    e.printStackTrace();
    fail("Exception in testing temp dir");
    tearDown();
  }
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:29,代碼來源:TestMiniMRChildTask.java

示例9: configureWaitingJobConf

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
/**
 * Configure a waiting job
 */
static void configureWaitingJobConf(JobConf jobConf, Path inDir,
                                    Path outputPath, int numMaps, int numRed,
                                    String jobName, String mapSignalFilename,
                                    String redSignalFilename)
throws IOException {
  jobConf.setJobName(jobName);
  jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
  jobConf.setOutputFormat(SequenceFileOutputFormat.class);
  FileInputFormat.setInputPaths(jobConf, inDir);
  FileOutputFormat.setOutputPath(jobConf, outputPath);
  jobConf.setMapperClass(UtilsForTests.HalfWaitingMapper.class);
  jobConf.setReducerClass(IdentityReducer.class);
  jobConf.setOutputKeyClass(BytesWritable.class);
  jobConf.setOutputValueClass(BytesWritable.class);
  jobConf.setInputFormat(RandomInputFormat.class);
  jobConf.setNumMapTasks(numMaps);
  jobConf.setNumReduceTasks(numRed);
  jobConf.setJar("build/test/testjar/testjob.jar");
  jobConf.set(getTaskSignalParameter(true), mapSignalFilename);
  jobConf.set(getTaskSignalParameter(false), redSignalFilename);
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:25,代碼來源:UtilsForTests.java

示例10: runJobSucceed

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
static RunningJob runJobSucceed(JobConf conf, Path inDir, Path outDir)
       throws IOException {
  conf.setJobName("test-job-succeed");
  conf.setMapperClass(IdentityMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  
  RunningJob job = UtilsForTests.runJob(conf, inDir, outDir);
  while (!job.isComplete()) {
    try {
      Thread.sleep(100);
    } catch (InterruptedException e) {
      break;
    }
  }

  return job;
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:18,代碼來源:UtilsForTests.java

示例11: runJobFail

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
static RunningJob runJobFail(JobConf conf, Path inDir, Path outDir)
       throws IOException {
  conf.setJobName("test-job-fail");
  conf.setMapperClass(FailMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  
  RunningJob job = UtilsForTests.runJob(conf, inDir, outDir);
  while (!job.isComplete()) {
    try {
      Thread.sleep(100);
    } catch (InterruptedException e) {
      break;
    }
  }

  return job;
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:18,代碼來源:UtilsForTests.java

示例12: runJobKill

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
static RunningJob runJobKill(JobConf conf,  Path inDir, Path outDir)
       throws IOException {

  conf.setJobName("test-job-kill");
  conf.setMapperClass(KillMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  
  RunningJob job = UtilsForTests.runJob(conf, inDir, outDir);
  while (job.getJobState() != JobStatus.RUNNING) {
    try {
      Thread.sleep(100);
    } catch (InterruptedException e) {
      break;
    }
  }
  job.killJob();
  while (job.cleanupProgress() == 0.0f) {
    try {
      Thread.sleep(10);
    } catch (InterruptedException ie) {
      break;
    }
  }

  return job;
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:27,代碼來源:UtilsForTests.java

示例13: launchTest

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
/**
 * Launch tests 
 * @param conf Configuration of the mapreduce job.
 * @param inDir input path
 * @param outDir output path
 * @param input Input text
 * @throws IOException
 */
public void launchTest(JobConf conf,
                       Path inDir,
                       Path outDir,
                       String input)
throws IOException {
  configure(conf, inDir, outDir, input, 
            MapClass.class, IdentityReducer.class);

  FileSystem outFs = outDir.getFileSystem(conf);
  
  // Launch job with default option for temp dir. 
  // i.e. temp dir is ./tmp 
  JobClient.runJob(conf);
  outFs.delete(outDir, true);

  // Launch job by giving relative path to temp dir.
  conf.set("mapred.child.tmp", "../temp");
  JobClient.runJob(conf);
  outFs.delete(outDir, true);

  // Launch job by giving absolute path to temp dir
  conf.set("mapred.child.tmp", "/tmp");
  JobClient.runJob(conf);
  outFs.delete(outDir, true);
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:34,代碼來源:TestMiniMRChildTask.java

示例14: testEmptyJoin

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
@Test
public void testEmptyJoin() throws Exception {
  JobConf job = new JobConf();
  Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
  Path[] src = { new Path(base,"i0"), new Path("i1"), new Path("i2") };
  job.set("mapreduce.join.expr", CompositeInputFormat.compose("outer",
      Fake_IF.class, src));
  job.setInputFormat(CompositeInputFormat.class);
  FileOutputFormat.setOutputPath(job, new Path(base, "out"));

  job.setMapperClass(IdentityMapper.class);
  job.setReducerClass(IdentityReducer.class);
  job.setOutputKeyClass(IncomparableKey.class);
  job.setOutputValueClass(NullWritable.class);

  JobClient.runJob(job);
  base.getFileSystem(job).delete(base, true);
}
 
開發者ID:hopshadoop,項目名稱:hops,代碼行數:19,代碼來源:TestDatamerge.java

示例15: runTestTaskEnv

import org.apache.hadoop.mapred.lib.IdentityReducer; //導入依賴的package包/類
void runTestTaskEnv(JobConf conf, Path inDir, Path outDir) throws IOException {
  String input = "The input";
  configure(conf, inDir, outDir, input, EnvCheckMapper.class, 
      IdentityReducer.class);
  // test 
  //  - new SET of new var (MY_PATH)
  //  - set of old var (HOME)
  //  - append to an old var from modified env (LD_LIBRARY_PATH)
  //  - append to an old var from tt's env (PATH)
  //  - append to a new var (NEW_PATH)
  conf.set("mapred.child.env", 
           "MY_PATH=/tmp,HOME=/tmp,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp,"
           + "PATH=$PATH:/tmp,NEW_PATH=$NEW_PATH:/tmp");
  conf.set("path", System.getenv("PATH"));
  RunningJob job = JobClient.runJob(conf);
  assertTrue("The environment checker job failed.", job.isSuccessful());
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:18,代碼來源:TestMiniMRChildTask.java


注:本文中的org.apache.hadoop.mapred.lib.IdentityReducer類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。