当前位置: 首页>>代码示例>>Java>>正文


Java Job.getCounters方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Job.getCounters方法的典型用法代码示例。如果您正苦于以下问题:Java Job.getCounters方法的具体用法?Java Job.getCounters怎么用?Java Job.getCounters使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.Job的用法示例。


在下文中一共展示了Job.getCounters方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: JobMetrics

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public JobMetrics(Job job, String bytesReplicatedKey) {
  Builder<String, Long> builder = ImmutableMap.builder();
  if (job != null) {
    Counters counters;
    try {
      counters = job.getCounters();
    } catch (IOException e) {
      throw new CircusTrainException("Unable to get counters from job.", e);
    }
    if (counters != null) {
      for (CounterGroup group : counters) {
        for (Counter counter : group) {
          builder.put(DotJoiner.join(group.getName(), counter.getName()), counter.getValue());
        }
      }
    }
  }
  metrics = builder.build();
  Long bytesReplicatedValue = metrics.get(bytesReplicatedKey);
  if (bytesReplicatedValue != null) {
    bytesReplicated = bytesReplicatedValue;
  } else {
    bytesReplicated = 0L;
  }
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:26,代码来源:JobMetrics.java

示例2: runJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = doSubmitJob(job);
  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
      .findCounter("HDFS_BYTES_READ").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords =  ConfigurationHelper.getNumMapInputRecords(job);
    LOG.info("Exported " + numRecords + " records.");
  }

  return success;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:25,代码来源:ExportJobBase.java

示例3: runJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = doSubmitJob(job);
  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
        .findCounter("HDFS_BYTES_READ").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords =  ConfigurationHelper.getNumMapInputRecords(job);
    LOG.info("Exported " + numRecords + " records.");
  }

  return success;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:25,代码来源:HdfsOdpsImportJob.java

示例4: verifySleepJobCounters

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
protected void verifySleepJobCounters(Job job) throws InterruptedException,
    IOException {
  Counters counters = job.getCounters();
  Assert.assertEquals(3, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
      .getValue());
  Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
      .getValue());
  Assert.assertEquals(numSleepReducers,
      counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
  Assert
      .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
          && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
  Assert
      .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
          && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestMRJobs.java

示例5: call

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
public TimingResult call() throws Exception {
  PerformanceEvaluation.TestOptions opts = PerformanceEvaluation.parseOpts(argv);
  PerformanceEvaluation.checkTable(admin, opts);
  PerformanceEvaluation.RunResult results[] = null;
  long numRows = opts.totalRows;
  long elapsedTime = 0;
  if (opts.nomapred) {
    results = PerformanceEvaluation.doLocalClients(opts, admin.getConfiguration());
    for (PerformanceEvaluation.RunResult r : results) {
      elapsedTime = Math.max(elapsedTime, r.duration);
    }
  } else {
    Job job = PerformanceEvaluation.doMapReduce(opts, admin.getConfiguration());
    Counters counters = job.getCounters();
    numRows = counters.findCounter(PerformanceEvaluation.Counter.ROWS).getValue();
    elapsedTime = counters.findCounter(PerformanceEvaluation.Counter.ELAPSED_TIME).getValue();
  }
  return new TimingResult(numRows, elapsedTime, results);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:IntegrationTestRegionReplicaPerf.java

示例6: runJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Actually run the MapReduce job.
 */
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = doSubmitJob(job);

  if (isHCatJob) {
    SqoopHCatUtilities.instance().invokeOutputCommitterForLocalMode(job);
  }

  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
      .findCounter("HDFS_BYTES_WRITTEN").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords = ConfigurationHelper.getNumMapOutputRecords(job);
    LOG.info("Retrieved " + numRecords + " records.");
  }
  return success;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:32,代码来源:ImportJobBase.java

示例7: verifyRandomWriterCounters

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
protected void verifyRandomWriterCounters(Job job)
    throws InterruptedException, IOException {
  Counters counters = job.getCounters();
  Assert.assertEquals(3, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
      .getValue());
  Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
      .getValue());
  Assert
      .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
          && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestMRJobs.java

示例8: verifyFailingMapperCounters

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
protected void verifyFailingMapperCounters(Job job)
    throws InterruptedException, IOException {
  Counters counters = job.getCounters();
  Assert.assertEquals(2, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
      .getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
      .getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
      .getValue());
  Assert
      .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
          && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestMRJobs.java

示例9: verifySleepJobCounters

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
protected void verifySleepJobCounters(Job job) throws InterruptedException,
    IOException {
  Counters counters = job.getCounters();
  super.verifySleepJobCounters(job);
  Assert.assertEquals(3,
      counters.findCounter(JobCounter.NUM_UBER_SUBMAPS).getValue());
  Assert.assertEquals(numSleepReducers,
      counters.findCounter(JobCounter.NUM_UBER_SUBREDUCES).getValue());
  Assert.assertEquals(3 + numSleepReducers,
      counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestUberAM.java

示例10: verifyRandomWriterCounters

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
protected void verifyRandomWriterCounters(Job job)
    throws InterruptedException, IOException {
  super.verifyRandomWriterCounters(job);
  Counters counters = job.getCounters();
  Assert.assertEquals(3, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS)
      .getValue());
  Assert.assertEquals(3,
      counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestUberAM.java

示例11: verifyFailingMapperCounters

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
protected void verifyFailingMapperCounters(Job job)
    throws InterruptedException, IOException {
  Counters counters = job.getCounters();
  super.verifyFailingMapperCounters(job);
  Assert.assertEquals(2,
      counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS)
      .getValue());
  Assert.assertEquals(2, counters
      .findCounter(JobCounter.NUM_FAILED_UBERTASKS).getValue());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestUberAM.java

示例12: run

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
  String[] otherArgs = new GenericOptionsParser(getConf(), args).getRemainingArgs();
  if (!doCommandLine(otherArgs)) {
    return 1;
  }

  Job job = createSubmittableJob(otherArgs);
  if (!job.waitForCompletion(true)) {
    LOG.info("Map-reduce job failed!");
    return 1;
  }
  counters = job.getCounters();
  return 0;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:SyncTable.java

示例13: testJobHistoryData

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Test (timeout = 90000)
public void testJobHistoryData() throws IOException, InterruptedException,
    AvroRemoteException, ClassNotFoundException {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }


  
  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(mrCluster.getConfig());
  // Job with 3 maps and 2 reduces
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.waitForCompletion(true);
  Counters counterMR = job.getCounters();
  JobId jobId = TypeConverter.toYarn(job.getJobID());
  ApplicationId appID = jobId.getAppId();
  int pollElapsed = 0;
  while (true) {
    Thread.sleep(1000);
    pollElapsed += 1000;

    if (TERMINAL_RM_APP_STATES.contains(
        mrCluster.getResourceManager().getRMContext().getRMApps().get(appID)
        .getState())) {
      break;
    }

    if (pollElapsed >= 60000) {
      LOG.warn("application did not reach terminal state within 60 seconds");
      break;
    }
  }
  Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager()
    .getRMContext().getRMApps().get(appID).getState());
  Counters counterHS = job.getCounters();
  //TODO the Assert below worked. need to check
  //Should we compare each field or convert to V2 counter and compare
  LOG.info("CounterHS " + counterHS);
  LOG.info("CounterMR " + counterMR);
  Assert.assertEquals(counterHS, counterMR);
  
  HSClientProtocol historyClient = instantiateHistoryProxy();
  GetJobReportRequest gjReq = Records.newRecord(GetJobReportRequest.class);
  gjReq.setJobId(jobId);
  JobReport jobReport = historyClient.getJobReport(gjReq).getJobReport();
  verifyJobReport(jobReport, jobId);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestMRJobsWithHistoryService.java

示例14: testSpeculativeExecution

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Test
public void testSpeculativeExecution() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
         + " not found. Not running test.");
    return;
  }

  /*------------------------------------------------------------------
   * Test that Map/Red does not speculate if MAP_SPECULATIVE and 
   * REDUCE_SPECULATIVE are both false.
   * -----------------------------------------------------------------
   */
  Job job = runSpecTest(false, false);

  boolean succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
  Counters counters = job.getCounters();
  Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
          .getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
          .getValue());
  Assert.assertEquals(0, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
          .getValue());

  /*----------------------------------------------------------------------
   * Test that Mapper speculates if MAP_SPECULATIVE is true and
   * REDUCE_SPECULATIVE is false.
   * ---------------------------------------------------------------------
   */
  job = runSpecTest(true, false);

  succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
  counters = job.getCounters();

  // The long-running map will be killed and a new one started.
  Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
          .getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
          .getValue());
  Assert.assertEquals(0, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
          .getValue());
  Assert.assertEquals(1, counters.findCounter(JobCounter.NUM_KILLED_MAPS)
      .getValue());

  /*----------------------------------------------------------------------
   * Test that Reducer speculates if REDUCE_SPECULATIVE is true and
   * MAP_SPECULATIVE is false.
   * ---------------------------------------------------------------------
   */
  job = runSpecTest(false, true);

  succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
  counters = job.getCounters();

  // The long-running map will be killed and a new one started.
  Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
          .getValue());
  Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
          .getValue());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:67,代码来源:TestSpeculativeExecution.java

示例15: testScanMapReduce

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public void testScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException {
  Stopwatch scanOpenTimer = new Stopwatch();
  Stopwatch scanTimer = new Stopwatch();

  Scan scan = getScan();

  String jobName = "testScanMapReduce";

  Job job = new Job(conf);
  job.setJobName(jobName);

  job.setJarByClass(getClass());

  TableMapReduceUtil.initTableMapperJob(
      this.tablename,
      scan,
      MyMapper.class,
      NullWritable.class,
      NullWritable.class,
      job
  );

  job.setNumReduceTasks(0);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(NullWritable.class);
  job.setOutputFormatClass(NullOutputFormat.class);

  scanTimer.start();
  job.waitForCompletion(true);
  scanTimer.stop();

  Counters counters = job.getCounters();
  long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue();
  long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue();

  long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue();
  double throughput = (double)totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
  double throughputRows = (double)numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);
  double throughputCells = (double)numCells / scanTimer.elapsedTime(TimeUnit.SECONDS);

  System.out.println("HBase scan mapreduce: ");
  System.out.println("total time to open scanner: " + scanOpenTimer.elapsedMillis() + " ms");
  System.out.println("total time to scan: " + scanTimer.elapsedMillis() + " ms");

  System.out.println("total bytes: " + totalBytes + " bytes ("
      + StringUtils.humanReadableInt(totalBytes) + ")");
  System.out.println("throughput  : " + StringUtils.humanReadableInt((long)throughput) + "B/s");
  System.out.println("total rows  : " + numRows);
  System.out.println("throughput  : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s");
  System.out.println("total cells : " + numCells);
  System.out.println("throughput  : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:53,代码来源:ScanPerformanceEvaluation.java


注:本文中的org.apache.hadoop.mapreduce.Job.getCounters方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。