當前位置: 首頁>>代碼示例>>Java>>正文


Java Job.getCounters方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapreduce.Job.getCounters方法的典型用法代碼示例。如果您正苦於以下問題:Java Job.getCounters方法的具體用法?Java Job.getCounters怎麽用?Java Job.getCounters使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapreduce.Job的用法示例。


在下文中一共展示了Job.getCounters方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: JobMetrics

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public JobMetrics(Job job, String bytesReplicatedKey) {
  Builder<String, Long> builder = ImmutableMap.builder();
  if (job != null) {
    Counters counters;
    try {
      counters = job.getCounters();
    } catch (IOException e) {
      throw new CircusTrainException("Unable to get counters from job.", e);
    }
    if (counters != null) {
      for (CounterGroup group : counters) {
        for (Counter counter : group) {
          builder.put(DotJoiner.join(group.getName(), counter.getName()), counter.getValue());
        }
      }
    }
  }
  metrics = builder.build();
  Long bytesReplicatedValue = metrics.get(bytesReplicatedKey);
  if (bytesReplicatedValue != null) {
    bytesReplicated = bytesReplicatedValue;
  } else {
    bytesReplicated = 0L;
  }
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:26,代碼來源:JobMetrics.java

示例2: runJob

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = doSubmitJob(job);
  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
      .findCounter("HDFS_BYTES_READ").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords =  ConfigurationHelper.getNumMapInputRecords(job);
    LOG.info("Exported " + numRecords + " records.");
  }

  return success;
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:25,代碼來源:ExportJobBase.java

示例3: runJob

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = doSubmitJob(job);
  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
        .findCounter("HDFS_BYTES_READ").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords =  ConfigurationHelper.getNumMapInputRecords(job);
    LOG.info("Exported " + numRecords + " records.");
  }

  return success;
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:25,代碼來源:HdfsOdpsImportJob.java

示例4: verifySleepJobCounters

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
protected void verifySleepJobCounters(Job job) throws InterruptedException,
    IOException {
  Counters counters = job.getCounters();
  Assert.assertEquals(3, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
      .getValue());
  Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
      .getValue());
  Assert.assertEquals(numSleepReducers,
      counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
  Assert
      .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
          && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
  Assert
      .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
          && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:TestMRJobs.java

示例5: call

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Override
public TimingResult call() throws Exception {
  PerformanceEvaluation.TestOptions opts = PerformanceEvaluation.parseOpts(argv);
  PerformanceEvaluation.checkTable(admin, opts);
  PerformanceEvaluation.RunResult results[] = null;
  long numRows = opts.totalRows;
  long elapsedTime = 0;
  if (opts.nomapred) {
    results = PerformanceEvaluation.doLocalClients(opts, admin.getConfiguration());
    for (PerformanceEvaluation.RunResult r : results) {
      elapsedTime = Math.max(elapsedTime, r.duration);
    }
  } else {
    Job job = PerformanceEvaluation.doMapReduce(opts, admin.getConfiguration());
    Counters counters = job.getCounters();
    numRows = counters.findCounter(PerformanceEvaluation.Counter.ROWS).getValue();
    elapsedTime = counters.findCounter(PerformanceEvaluation.Counter.ELAPSED_TIME).getValue();
  }
  return new TimingResult(numRows, elapsedTime, results);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:IntegrationTestRegionReplicaPerf.java

示例6: runJob

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/**
 * Actually run the MapReduce job.
 */
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = doSubmitJob(job);

  if (isHCatJob) {
    SqoopHCatUtilities.instance().invokeOutputCommitterForLocalMode(job);
  }

  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
      .findCounter("HDFS_BYTES_WRITTEN").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords = ConfigurationHelper.getNumMapOutputRecords(job);
    LOG.info("Retrieved " + numRecords + " records.");
  }
  return success;
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:32,代碼來源:ImportJobBase.java

示例7: verifyRandomWriterCounters

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
protected void verifyRandomWriterCounters(Job job)
    throws InterruptedException, IOException {
  Counters counters = job.getCounters();
  Assert.assertEquals(3, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
      .getValue());
  Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
      .getValue());
  Assert
      .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
          && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:TestMRJobs.java

示例8: verifyFailingMapperCounters

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
protected void verifyFailingMapperCounters(Job job)
    throws InterruptedException, IOException {
  Counters counters = job.getCounters();
  Assert.assertEquals(2, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
      .getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
      .getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
      .getValue());
  Assert
      .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
          && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:TestMRJobs.java

示例9: verifySleepJobCounters

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Override
protected void verifySleepJobCounters(Job job) throws InterruptedException,
    IOException {
  Counters counters = job.getCounters();
  super.verifySleepJobCounters(job);
  Assert.assertEquals(3,
      counters.findCounter(JobCounter.NUM_UBER_SUBMAPS).getValue());
  Assert.assertEquals(numSleepReducers,
      counters.findCounter(JobCounter.NUM_UBER_SUBREDUCES).getValue());
  Assert.assertEquals(3 + numSleepReducers,
      counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:TestUberAM.java

示例10: verifyRandomWriterCounters

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Override
protected void verifyRandomWriterCounters(Job job)
    throws InterruptedException, IOException {
  super.verifyRandomWriterCounters(job);
  Counters counters = job.getCounters();
  Assert.assertEquals(3, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS)
      .getValue());
  Assert.assertEquals(3,
      counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:11,代碼來源:TestUberAM.java

示例11: verifyFailingMapperCounters

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Override
protected void verifyFailingMapperCounters(Job job)
    throws InterruptedException, IOException {
  Counters counters = job.getCounters();
  super.verifyFailingMapperCounters(job);
  Assert.assertEquals(2,
      counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS)
      .getValue());
  Assert.assertEquals(2, counters
      .findCounter(JobCounter.NUM_FAILED_UBERTASKS).getValue());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:TestUberAM.java

示例12: run

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Override
public int run(String[] args) throws Exception {
  String[] otherArgs = new GenericOptionsParser(getConf(), args).getRemainingArgs();
  if (!doCommandLine(otherArgs)) {
    return 1;
  }

  Job job = createSubmittableJob(otherArgs);
  if (!job.waitForCompletion(true)) {
    LOG.info("Map-reduce job failed!");
    return 1;
  }
  counters = job.getCounters();
  return 0;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:16,代碼來源:SyncTable.java

示例13: testJobHistoryData

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Test (timeout = 90000)
public void testJobHistoryData() throws IOException, InterruptedException,
    AvroRemoteException, ClassNotFoundException {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }


  
  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(mrCluster.getConfig());
  // Job with 3 maps and 2 reduces
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.waitForCompletion(true);
  Counters counterMR = job.getCounters();
  JobId jobId = TypeConverter.toYarn(job.getJobID());
  ApplicationId appID = jobId.getAppId();
  int pollElapsed = 0;
  while (true) {
    Thread.sleep(1000);
    pollElapsed += 1000;

    if (TERMINAL_RM_APP_STATES.contains(
        mrCluster.getResourceManager().getRMContext().getRMApps().get(appID)
        .getState())) {
      break;
    }

    if (pollElapsed >= 60000) {
      LOG.warn("application did not reach terminal state within 60 seconds");
      break;
    }
  }
  Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager()
    .getRMContext().getRMApps().get(appID).getState());
  Counters counterHS = job.getCounters();
  //TODO the Assert below worked. need to check
  //Should we compare each field or convert to V2 counter and compare
  LOG.info("CounterHS " + counterHS);
  LOG.info("CounterMR " + counterMR);
  Assert.assertEquals(counterHS, counterMR);
  
  HSClientProtocol historyClient = instantiateHistoryProxy();
  GetJobReportRequest gjReq = Records.newRecord(GetJobReportRequest.class);
  gjReq.setJobId(jobId);
  JobReport jobReport = historyClient.getJobReport(gjReq).getJobReport();
  verifyJobReport(jobReport, jobId);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:53,代碼來源:TestMRJobsWithHistoryService.java

示例14: testSpeculativeExecution

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Test
public void testSpeculativeExecution() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
         + " not found. Not running test.");
    return;
  }

  /*------------------------------------------------------------------
   * Test that Map/Red does not speculate if MAP_SPECULATIVE and 
   * REDUCE_SPECULATIVE are both false.
   * -----------------------------------------------------------------
   */
  Job job = runSpecTest(false, false);

  boolean succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
  Counters counters = job.getCounters();
  Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
          .getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
          .getValue());
  Assert.assertEquals(0, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
          .getValue());

  /*----------------------------------------------------------------------
   * Test that Mapper speculates if MAP_SPECULATIVE is true and
   * REDUCE_SPECULATIVE is false.
   * ---------------------------------------------------------------------
   */
  job = runSpecTest(true, false);

  succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
  counters = job.getCounters();

  // The long-running map will be killed and a new one started.
  Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
          .getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
          .getValue());
  Assert.assertEquals(0, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
          .getValue());
  Assert.assertEquals(1, counters.findCounter(JobCounter.NUM_KILLED_MAPS)
      .getValue());

  /*----------------------------------------------------------------------
   * Test that Reducer speculates if REDUCE_SPECULATIVE is true and
   * MAP_SPECULATIVE is false.
   * ---------------------------------------------------------------------
   */
  job = runSpecTest(false, true);

  succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
  counters = job.getCounters();

  // The long-running map will be killed and a new one started.
  Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
          .getValue());
  Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
          .getValue());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:67,代碼來源:TestSpeculativeExecution.java

示例15: testScanMapReduce

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public void testScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException {
  Stopwatch scanOpenTimer = new Stopwatch();
  Stopwatch scanTimer = new Stopwatch();

  Scan scan = getScan();

  String jobName = "testScanMapReduce";

  Job job = new Job(conf);
  job.setJobName(jobName);

  job.setJarByClass(getClass());

  TableMapReduceUtil.initTableMapperJob(
      this.tablename,
      scan,
      MyMapper.class,
      NullWritable.class,
      NullWritable.class,
      job
  );

  job.setNumReduceTasks(0);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(NullWritable.class);
  job.setOutputFormatClass(NullOutputFormat.class);

  scanTimer.start();
  job.waitForCompletion(true);
  scanTimer.stop();

  Counters counters = job.getCounters();
  long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue();
  long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue();

  long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue();
  double throughput = (double)totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
  double throughputRows = (double)numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);
  double throughputCells = (double)numCells / scanTimer.elapsedTime(TimeUnit.SECONDS);

  System.out.println("HBase scan mapreduce: ");
  System.out.println("total time to open scanner: " + scanOpenTimer.elapsedMillis() + " ms");
  System.out.println("total time to scan: " + scanTimer.elapsedMillis() + " ms");

  System.out.println("total bytes: " + totalBytes + " bytes ("
      + StringUtils.humanReadableInt(totalBytes) + ")");
  System.out.println("throughput  : " + StringUtils.humanReadableInt((long)throughput) + "B/s");
  System.out.println("total rows  : " + numRows);
  System.out.println("throughput  : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s");
  System.out.println("total cells : " + numCells);
  System.out.println("throughput  : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:53,代碼來源:ScanPerformanceEvaluation.java


注:本文中的org.apache.hadoop.mapreduce.Job.getCounters方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。