当前位置: 首页>>代码示例>>Java>>正文


Java RunningJob.waitForCompletion方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.RunningJob.waitForCompletion方法的典型用法代码示例。如果您正苦于以下问题:Java RunningJob.waitForCompletion方法的具体用法?Java RunningJob.waitForCompletion怎么用?Java RunningJob.waitForCompletion使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapred.RunningJob的用法示例。


在下文中一共展示了RunningJob.waitForCompletion方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: mrRun

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
private void mrRun() throws Exception {
  FileSystem fs = FileSystem.get(getJobConf());
  Path inputDir = new Path("input");
  fs.mkdirs(inputDir);
  Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
  writer.write("hello");
  writer.close();

  Path outputDir = new Path("output", "output");

  JobConf jobConf = new JobConf(getJobConf());
  jobConf.setInt("mapred.map.tasks", 1);
  jobConf.setInt("mapred.map.max.attempts", 1);
  jobConf.setInt("mapred.reduce.max.attempts", 1);
  jobConf.set("mapred.input.dir", inputDir.toString());
  jobConf.set("mapred.output.dir", outputDir.toString());

  JobClient jobClient = new JobClient(jobConf);
  RunningJob runJob = jobClient.submitJob(jobConf);
  runJob.waitForCompletion();
  assertTrue(runJob.isComplete());
  assertTrue(runJob.isSuccessful());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestMiniMRProxyUser.java

示例2: runJob

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
@Override
protected void runJob(String jobName, Configuration c, List<Scan> scans)
    throws IOException, InterruptedException, ClassNotFoundException {
  JobConf job = new JobConf(TEST_UTIL.getConfiguration());

  job.setJobName(jobName);
  job.setMapperClass(Mapper.class);
  job.setReducerClass(Reducer.class);

  TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class,
      ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir);

  TableMapReduceUtil.addDependencyJars(job);

  job.setReducerClass(Reducer.class);
  job.setNumReduceTasks(1); // one to get final "first" and "last" key
  FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));
  LOG.info("Started " + job.getJobName());

  RunningJob runningJob = JobClient.runJob(job);
  runningJob.waitForCompletion();
  assertTrue(runningJob.isSuccessful());
  LOG.info("After map/reduce completion - job " + jobName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestMultiTableSnapshotInputFormat.java

示例3: encryptedShuffleWithCerts

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
private void encryptedShuffleWithCerts(boolean useClientCerts)
  throws Exception {
  try {
    Configuration conf = new Configuration();
    String keystoresDir = new File(BASEDIR).getAbsolutePath();
    String sslConfsDir =
      KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
                                    useClientCerts);
    conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
    startCluster(conf);
    FileSystem fs = FileSystem.get(getJobConf());
    Path inputDir = new Path("input");
    fs.mkdirs(inputDir);
    Writer writer =
      new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
    writer.write("hello");
    writer.close();

    Path outputDir = new Path("output", "output");

    JobConf jobConf = new JobConf(getJobConf());
    jobConf.setInt("mapred.map.tasks", 1);
    jobConf.setInt("mapred.map.max.attempts", 1);
    jobConf.setInt("mapred.reduce.max.attempts", 1);
    jobConf.set("mapred.input.dir", inputDir.toString());
    jobConf.set("mapred.output.dir", outputDir.toString());
    JobClient jobClient = new JobClient(jobConf);
    RunningJob runJob = jobClient.submitJob(jobConf);
    runJob.waitForCompletion();
    Assert.assertTrue(runJob.isComplete());
    Assert.assertTrue(runJob.isSuccessful());
  } finally {
    stopCluster();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestEncryptedShuffle.java

示例4: encryptedShuffleWithCerts

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
private void encryptedShuffleWithCerts(boolean useClientCerts)
  throws Exception {
  try {
    Configuration conf = new Configuration();
    String keystoresDir = new File(BASEDIR).getAbsolutePath();
    String sslConfsDir =
      KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
                                    useClientCerts);
    conf.setBoolean(JobTracker.SHUFFLE_SSL_ENABLED_KEY, true);
    startCluster(conf);
    FileSystem fs = FileSystem.get(getJobConf(conf));
    Path inputDir = new Path("input");
    fs.mkdirs(inputDir);
    Writer writer =
      new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
    writer.write("hello");
    writer.close();

    Path outputDir = new Path("output", "output");

    JobConf jobConf = new JobConf(getJobConf(conf));
    jobConf.setInt("mapred.map.tasks", 1);
    jobConf.setInt("mapred.map.max.attempts", 1);
    jobConf.setInt("mapred.reduce.max.attempts", 1);
    jobConf.set("mapred.input.dir", inputDir.toString());
    jobConf.set("mapred.output.dir", outputDir.toString());
    JobClient jobClient = new JobClient(jobConf);
    RunningJob runJob = jobClient.submitJob(jobConf);
    runJob.waitForCompletion();
    Assert.assertTrue(runJob.isComplete());
    Assert.assertTrue(runJob.isSuccessful());
  } finally {
    stopCluster();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:37,代码来源:TestEncryptedShuffle.java

示例5: start

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
public void start(String[] args, int startIndex) throws IOException {
  try {
    while(startIndex < args.length) {
      String cmd = args[startIndex ++];
      if (cmd.equals(NUM_MAPPERS_KEY)) {
        numMappers = Integer.valueOf(args[startIndex ++]);
      } else if (cmd.equals(NUM_SUBDIRS_KEY)) {
        numSubDirs = Integer.valueOf(args[startIndex ++]);
      } else if (cmd.equals(NUM_FILES_SUB_DIR_KEY)) {
        numFilesEachSubDirs = Integer.valueOf(args[startIndex ++]);
      } else if (cmd.equals(NUM_ROUND_KEY)) {
        round = Integer.valueOf(args[startIndex ++]);
      } else if (cmd.equals(WORKPLACE_KEY)) {
        workplace = args[startIndex ++];
      }
    }
  } catch (Exception e) {
    printUsage();
    System.exit(-1);
  }
  
  if (workplace.trim().isEmpty()) {
    printUsage();
    System.exit(-1);
  }
  
  if (!workplace.endsWith(Path.SEPARATOR)) {
    workplace += Path.SEPARATOR;
  }
  
  JobConf jobConf = createJobConf(conf);
  
  JobClient client = new JobClient(jobConf);
  RunningJob runningJob = client.submitJob(jobConf);
  runningJob.waitForCompletion();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:37,代码来源:TxnGenerator.java

示例6: testRunningJobLocally

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
/**
 * This method creates a sample MR job and submits that JobConf object to the
 * static MiniClusterController method to be executed.
 */
@Test
public void testRunningJobLocally() throws IOException, InterruptedException {
  JobConf sampleJob = createWordCountMRJobConf();
  RunningJob runningJob = miniCluster_.runJob(sampleJob);
  runningJob.waitForCompletion();
  assertTrue(runningJob.isSuccessful());
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:12,代码来源:TestMiniClusterController.java

示例7: testGetConfigForMiniCluster

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
/**
 * This method gets a JobConf object from the static MiniClusterController
 * method, fills it with a sample MR job and then executes the job.
 */
@Test
public void testGetConfigForMiniCluster() throws IOException {
  JobConf sampleJob = miniCluster_.getJobConf(WordCount.class);
  fillInWordCountMRJobConf(sampleJob);
  RunningJob runningJob = JobClient.runJob(sampleJob);
  runningJob.waitForCompletion();
  assertTrue(runningJob.isSuccessful());
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:13,代码来源:TestMiniClusterController.java

示例8: start

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
public void start(String[] args, int startIndex) throws IOException {
  try {
    while(startIndex < args.length) {
      String cmd = args[startIndex ++];
      if (cmd.equals(NUM_MAPPERS_KEY)) {
        numMappers = Integer.valueOf(args[startIndex ++]);
      } else if (cmd.equals(NUM_SUBDIRS_KEY)) {
        numSubDirs = Integer.valueOf(args[startIndex ++]);
      } else if (cmd.equals(NUM_FILES_SUB_DIR_KEY)) {
        numFilesEachSubDirs = Integer.valueOf(args[startIndex ++]);
      } else if (cmd.equals(NUM_ROUND_KEY)) {
        round = Integer.valueOf(args[startIndex ++]);
      } else if (cmd.equals(WORKPLACE_KEY)) {
        workplace = args[startIndex ++];
      } else if (cmd.equals(NOTIFIER_SERVER_ADDR_KEY)) {
        notifierServerAddrStr = args[startIndex ++];
      } else if (cmd.equals(NOTIFIER_SERVER_PORT_KEY)) {
        notifierServerPortStr = args[startIndex ++];
      } else {
        printUsage();
        System.exit(-1);
      }
    }
  } catch (Exception e) {
    printUsage();
    System.exit(-1);
  }
  
  if (workplace.trim().isEmpty() ||
      notifierServerAddrStr.trim().isEmpty()) {
    printUsage();
    System.exit(-1);
  }
  
  if (!workplace.endsWith(Path.SEPARATOR)) {
    workplace += Path.SEPARATOR;
  }
  
  JobConf jobConf = createJobConf(conf);
  
  JobClient client = new JobClient(jobConf);
  RunningJob runningJob = client.submitJob(jobConf);
  runningJob.waitForCompletion();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:45,代码来源:TxnConsumer.java


注:本文中的org.apache.hadoop.mapred.RunningJob.waitForCompletion方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。