当前位置: 首页>>代码示例>>Java>>正文


Java Values.REDUCE属性代码示例

本文整理汇总了Java中org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values.REDUCE属性的典型用法代码示例。如果您正苦于以下问题:Java Values.REDUCE属性的具体用法?Java Values.REDUCE怎么用?Java Values.REDUCE使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values的用法示例。


在下文中一共展示了Values.REDUCE属性的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getTaskInfo

private TaskInfo getTaskInfo(LoggedTask loggedTask) {
  if (loggedTask == null) {
    return new TaskInfo(0, 0, 0, 0, 0);
  }
  List<LoggedTaskAttempt> attempts = loggedTask.getAttempts();

  long inputBytes = -1;
  long inputRecords = -1;
  long outputBytes = -1;
  long outputRecords = -1;
  long heapMegabytes = -1;
  ResourceUsageMetrics metrics = new ResourceUsageMetrics();

  Values type = loggedTask.getTaskType();
  if ((type != Values.MAP) && (type != Values.REDUCE)) {
    throw new IllegalArgumentException(
        "getTaskInfo only supports MAP or REDUCE tasks: " + type.toString()
            + " for task = " + loggedTask.getTaskID());
  }

  for (LoggedTaskAttempt attempt : attempts) {
    attempt = sanitizeLoggedTaskAttempt(attempt);
    // ignore bad attempts or unsuccessful attempts.
    if ((attempt == null) || (attempt.getResult() != Values.SUCCESS)) {
      continue;
    }

    if (type == Values.MAP) {
      inputBytes = attempt.getHdfsBytesRead();
      inputRecords = attempt.getMapInputRecords();
      outputBytes =
          (job.getTotalReduces() > 0) ? attempt.getMapOutputBytes() : attempt
              .getHdfsBytesWritten();
      outputRecords = attempt.getMapOutputRecords();
      heapMegabytes =
          (job.getJobMapMB() > 0) ? job.getJobMapMB() : job
              .getHeapMegabytes();
    } else {
      inputBytes = attempt.getReduceShuffleBytes();
      inputRecords = attempt.getReduceInputRecords();
      outputBytes = attempt.getHdfsBytesWritten();
      outputRecords = attempt.getReduceOutputRecords();
      heapMegabytes =
          (job.getJobReduceMB() > 0) ? job.getJobReduceMB() : job
              .getHeapMegabytes();
    }
    // set the resource usage metrics
    metrics = attempt.getResourceUsageMetrics();
    break;
  }

  TaskInfo taskInfo =
      new TaskInfo(inputBytes, (int) inputRecords, outputBytes,
          (int) outputRecords, (int) heapMegabytes,
          metrics);
  return taskInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:ZombieJob.java

示例2: getTaskInfo

private TaskInfo getTaskInfo(LoggedTask loggedTask) {
  List<LoggedTaskAttempt> attempts = loggedTask.getAttempts();

  long inputBytes = -1;
  long inputRecords = -1;
  long outputBytes = -1;
  long outputRecords = -1;
  long heapMegabytes = -1;

  Values type = loggedTask.getTaskType();
  if ((type != Values.MAP) && (type != Values.REDUCE)) {
    throw new IllegalArgumentException(
        "getTaskInfo only supports MAP or REDUCE tasks: " + type.toString()
            + " for task = " + loggedTask.getTaskID());
  }

  for (LoggedTaskAttempt attempt : attempts) {
    attempt = sanitizeLoggedTaskAttempt(attempt);
    // ignore bad attempts or unsuccessful attempts.
    if ((attempt == null) || (attempt.getResult() != Values.SUCCESS)) {
      continue;
    }

    if (type == Values.MAP) {
      inputBytes = attempt.getHdfsBytesRead();
      inputRecords = attempt.getMapInputRecords();
      outputBytes =
          (job.getTotalReduces() > 0) ? attempt.getMapOutputBytes() : attempt
              .getHdfsBytesWritten();
      outputRecords = attempt.getMapOutputRecords();
      heapMegabytes =
          (job.getJobMapMB() > 0) ? job.getJobMapMB() : job
              .getHeapMegabytes();
    } else {
      inputBytes = attempt.getReduceShuffleBytes();
      inputRecords = attempt.getReduceInputRecords();
      outputBytes = attempt.getHdfsBytesWritten();
      outputRecords = attempt.getReduceOutputRecords();
      heapMegabytes =
          (job.getJobReduceMB() > 0) ? job.getJobReduceMB() : job
              .getHeapMegabytes();
    }
    break;
  }

  TaskInfo taskInfo =
      new TaskInfo(inputBytes, (int) inputRecords, outputBytes,
          (int) outputRecords, (int) heapMegabytes);
  return taskInfo;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:50,代码来源:ZombieJob.java

示例3: getTaskInfo

private TaskInfo getTaskInfo(LoggedTask loggedTask) {
  List<LoggedTaskAttempt> attempts = loggedTask.getAttempts();

  long inputBytes = -1;
  long inputRecords = -1;
  long outputBytes = -1;
  long outputRecords = -1;
  long heapMegabytes = -1;
  ResourceUsageMetrics metrics = new ResourceUsageMetrics();

  Values type = loggedTask.getTaskType();
  if ((type != Values.MAP) && (type != Values.REDUCE)) {
    throw new IllegalArgumentException(
        "getTaskInfo only supports MAP or REDUCE tasks: " + type.toString()
            + " for task = " + loggedTask.getTaskID());
  }

  for (LoggedTaskAttempt attempt : attempts) {
    attempt = sanitizeLoggedTaskAttempt(attempt);
    // ignore bad attempts or unsuccessful attempts.
    if ((attempt == null) || (attempt.getResult() != Values.SUCCESS)) {
      continue;
    }

    if (type == Values.MAP) {
      inputBytes = attempt.getHdfsBytesRead();
      inputRecords = attempt.getMapInputRecords();
      outputBytes =
          (job.getTotalReduces() > 0) ? attempt.getMapOutputBytes() : attempt
              .getHdfsBytesWritten();
      outputRecords = attempt.getMapOutputRecords();
      heapMegabytes =
          (job.getJobMapMB() > 0) ? job.getJobMapMB() : job
              .getHeapMegabytes();
    } else {
      inputBytes = attempt.getReduceShuffleBytes();
      inputRecords = attempt.getReduceInputRecords();
      outputBytes = attempt.getHdfsBytesWritten();
      outputRecords = attempt.getReduceOutputRecords();
      heapMegabytes =
          (job.getJobReduceMB() > 0) ? job.getJobReduceMB() : job
              .getHeapMegabytes();
    }
    // set the resource usage metrics
    metrics = attempt.getResourceUsageMetrics();
    break;
  }

  TaskInfo taskInfo =
      new TaskInfo(inputBytes, (int) inputRecords, outputBytes,
          (int) outputRecords, (int) heapMegabytes,
          metrics);
  return taskInfo;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:54,代码来源:ZombieJob.java


注:本文中的org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values.REDUCE属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。