当前位置: 首页>>代码示例>>Java>>正文


Java MapTaskAttemptInfo类代码示例

本文整理汇总了Java中org.apache.hadoop.tools.rumen.MapTaskAttemptInfo的典型用法代码示例。如果您正苦于以下问题:Java MapTaskAttemptInfo类的具体用法?Java MapTaskAttemptInfo怎么用?Java MapTaskAttemptInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


MapTaskAttemptInfo类属于org.apache.hadoop.tools.rumen包,在下文中一共展示了MapTaskAttemptInfo类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getTaskAttemptInfo

import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo; //导入依赖的package包/类
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
        100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
        100, 100, 100);
  }
  throw new UnsupportedOperationException();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:DebugJobProducer.java

示例2: getTaskAttemptInfo

import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo; //导入依赖的package包/类
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
        100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
        100, 100, 100);
  }
  throw new UnsupportedOperationException();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:DebugJobProducer.java

示例3: getTaskAttemptInfo

import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo; //导入依赖的package包/类
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),100,100,100);
  }
  throw new UnsupportedOperationException();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:DebugJobProducer.java

示例4: runMapTask

import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo; //导入依赖的package包/类
public void runMapTask(String taskTrackerName, TaskAttemptID taskId,
                       long mapStart, long mapRuntime, long killHeartbeat) {
  long mapDone = mapStart + mapRuntime;
  long mapEndHeartbeat = nextHeartbeat(mapDone);
  final boolean isKilled = (killHeartbeat>=0);
  if (isKilled) {
    mapEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
  }

  LOG.debug("mapStart=" + mapStart + ", mapDone=" + mapDone + 
            ", mapEndHeartbeat=" + mapEndHeartbeat + 
            ", killHeartbeat=" + killHeartbeat);
  
  final int numSlotsRequired = 1;
  org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi = 
      org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);        
  Task task = new MapTask("dummyjobfile", taskIdOldApi, 0, "dummysplitclass",
                          null, numSlotsRequired);
  // all byte counters are 0
  TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0); 
  MapTaskAttemptInfo taskAttemptInfo = 
      new MapTaskAttemptInfo(State.SUCCEEDED, taskInfo, mapRuntime);
  TaskTrackerAction action = 
      new SimulatorLaunchTaskAction(task, taskAttemptInfo);
  heartbeats.get(mapStart).get(taskTrackerName).addTaskTrackerAction(action);
  if (isKilled) {
    action = new KillTaskAction(taskIdOldApi);
    heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
       action);
  }

  for(long simulationTime = mapStart + heartbeatInterval; 
      simulationTime <= mapEndHeartbeat;
      simulationTime += heartbeatInterval) {
    State state = simulationTime < mapEndHeartbeat ? 
        State.RUNNING : State.SUCCEEDED;
    if (simulationTime == mapEndHeartbeat && isKilled) {
      state = State.KILLED;
    }
    MapTaskStatus mapStatus = new MapTaskStatus(
        task.getTaskID(), 0.0f, 0, state, "", "", null, Phase.MAP, null);
    heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
       mapStatus);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:46,代码来源:MockSimulatorJobTracker.java

示例5: runMapTask

import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo; //导入依赖的package包/类
public void runMapTask(String taskTrackerName, TaskAttemptID taskId,
                       long mapStart, long mapRuntime, long killHeartbeat) {
  long mapDone = mapStart + mapRuntime;
  long mapEndHeartbeat = nextHeartbeat(mapDone);
  final boolean isKilled = (killHeartbeat>=0);
  if (isKilled) {
    mapEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
  }

  LOG.debug("mapStart=" + mapStart + ", mapDone=" + mapDone + 
            ", mapEndHeartbeat=" + mapEndHeartbeat + 
            ", killHeartbeat=" + killHeartbeat);
  
  final int numSlotsRequired = 1;
  org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi = 
      org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);        
  Task task = new MapTask("dummyjobfile", taskIdOldApi, 0, new TaskSplitIndex(),
                           numSlotsRequired);
  // all byte counters are 0
  TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0); 
  MapTaskAttemptInfo taskAttemptInfo = 
      new MapTaskAttemptInfo(State.SUCCEEDED, taskInfo, mapRuntime);
  TaskTrackerAction action = 
      new SimulatorLaunchTaskAction(task, taskAttemptInfo);
  heartbeats.get(mapStart).get(taskTrackerName).addTaskTrackerAction(action);
  if (isKilled) {
    action = new KillTaskAction(taskIdOldApi);
    heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
       action);
  }

  for(long simulationTime = mapStart + heartbeatInterval; 
      simulationTime <= mapEndHeartbeat;
      simulationTime += heartbeatInterval) {
    State state = simulationTime < mapEndHeartbeat ? 
        State.RUNNING : State.SUCCEEDED;
    if (simulationTime == mapEndHeartbeat && isKilled) {
      state = State.KILLED;
    }
    MapTaskStatus mapStatus = new MapTaskStatus(
        task.getTaskID(), 0.0f, 0, state, "", "", null, Phase.MAP, null);
    heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
       mapStatus);
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:46,代码来源:MockSimulatorJobTracker.java


注:本文中的org.apache.hadoop.tools.rumen.MapTaskAttemptInfo类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。