当前位置: 首页>>代码示例>>Java>>正文


Java JobSplit.EMPTY_TASK_SPLIT属性代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.split.JobSplit.EMPTY_TASK_SPLIT属性的典型用法代码示例。如果您正苦于以下问题:Java JobSplit.EMPTY_TASK_SPLIT属性的具体用法?Java JobSplit.EMPTY_TASK_SPLIT怎么用?Java JobSplit.EMPTY_TASK_SPLIT使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.mapreduce.split.JobSplit的用法示例。


在下文中一共展示了JobSplit.EMPTY_TASK_SPLIT属性的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: FakeTaskInProgress

FakeTaskInProgress(JobID jId, JobConf jobConf, Task t, 
    boolean isMap, FakeJobInProgress job) {
  super(jId, "", JobSplit.EMPTY_TASK_SPLIT, job.jobtracker, jobConf, job, 
        0, 1);
  this.isMap = isMap;
  this.fakeJob = job;
  activeTasks = new TreeMap<TaskAttemptID, String>();
  activeTasks.put(t.getTaskID(), "tt");
  // create a fake status for a task that is running for a bit
  this.taskStatus = TaskStatus.createTaskStatus(isMap);
  taskStatus.setProgress(0.5f);
  taskStatus.setRunState(TaskStatus.State.RUNNING);
  if (jobConf.getMapSpeculativeExecution()) {
    //resetting of the hasSpeculativeMap is done
    //when speculative map is scheduled by the job.
    hasSpeculativeMap = true;
  } 
  if (jobConf.getReduceSpeculativeExecution()) {
    //resetting of the hasSpeculativeReduce is done
    //when speculative reduce is scheduled by the job.
    hasSpeculativeReduce = true;
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:23,代码来源:TestCapacityScheduler.java

示例2: obtainNewMapTask

@Override
public Task obtainNewMapTask(final TaskTrackerStatus tts, int clusterSize,
    int numUniqueHosts, int localityLevel) throws IOException {
  for (int map = 0; map < maps.length; map++) {
    FakeTaskInProgress tip = (FakeTaskInProgress) maps[map];
    if (!tip.isRunning() && !tip.isComplete() &&
        getLocalityLevel(tip, tts) < localityLevel) {
      TaskAttemptID attemptId = getTaskAttemptID(tip);
      JobSplit.TaskSplitMetaInfo split = JobSplit.EMPTY_TASK_SPLIT;
      Task task = new MapTask("", attemptId, 0, split.getSplitIndex(), 1) {
        @Override
        public String toString() {
          return String.format("%s on %s", getTaskID(), tts.getTrackerName());
        }
      };
      runningMapTasks++;
      tip.createTaskAttempt(task, tts.getTrackerName());
      nonLocalRunningMaps.add(tip);
      taskTrackerManager.startTask(tts.getTrackerName(), task, tip);
      return task;
    }
  }
  return null;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:TestFairScheduler.java

示例3: obtainNewMapTask

public Task obtainNewMapTask(final TaskTrackerStatus tts, int clusterSize,
    int numUniqueHosts, int localityLevel) throws IOException {
  for (int map = 0; map < maps.length; map++) {
    FakeTaskInProgress tip = (FakeTaskInProgress) maps[map];
    if (!tip.isRunning() && !tip.isComplete() &&
        getLocalityLevel(tip, tts) < localityLevel) {
      TaskAttemptID attemptId = getTaskAttemptID(tip);
      JobSplit.TaskSplitMetaInfo split = JobSplit.EMPTY_TASK_SPLIT;
      Task task = new MapTask("", attemptId, 0, split.getSplitIndex(), 1) {
        @Override
        public String toString() {
          return String.format("%s on %s", getTaskID(), tts.getTrackerName());
        }
      };
      runningMapTasks++;
      tip.createTaskAttempt(task, tts.getTrackerName());
      nonLocalRunningMaps.add(tip);
      taskTrackerManager.startTask(tts.getTrackerName(), task, tip);
      return task;
    }
  }
  return null;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:23,代码来源:TestFairScheduler.java

示例4: obtainNewMapTask

public Task obtainNewMapTask(final TaskTrackerStatus tts, int clusterSize,
    int numUniqueHosts, int localityLevel) throws IOException {
  for (int map = 0; map < maps.length; map++) {
    HFSPFakeTaskInProgress tip = (HFSPFakeTaskInProgress) maps[map];
    if (!tip.isRunning() && !tip.isComplete()
        && getLocalityLevel(tip, tts) < localityLevel) {
      TaskAttemptID attemptId = getTaskAttemptID(tip);
      JobSplit.TaskSplitMetaInfo split = JobSplit.EMPTY_TASK_SPLIT;
      Task task = new MapTask("", attemptId, 0, split.getSplitIndex(), 1) {
        @Override
        public String toString() {
          return String.format("%s on %s", getTaskID(), tts.getTrackerName());
        }
      };
      runningMapTasks++;
      tip.createTaskAttempt(task, tts.getTrackerName());
      nonLocalRunningMaps.add(tip);
      taskTrackerManager.startTask(tts.getTrackerName(), task, tip);
      return task;
    }
  }
  return null;
}
 
开发者ID:melrief,项目名称:HFSP,代码行数:23,代码来源:HFSPFakeJobInProgress.java

示例5: createAndAddTIP

private TaskInProgress createAndAddTIP(JobTracker jobtracker, 
                                       JobInProgress jip, TaskType type) {
  JobConf conf = jip.getJobConf();
  JobID id = jip.getJobID();
  // now create a fake tip for this fake job
  TaskInProgress tip = null;
  if (type == TaskType.MAP) {
    tip = new TaskInProgress(id, "dummy", JobSplit.EMPTY_TASK_SPLIT, 
                             jobtracker, conf, jip, 0, 1);
    jip.maps = new TaskInProgress[] {tip};
  } else if (type == TaskType.REDUCE) {
    tip = new TaskInProgress(id, "dummy", jip.desiredMaps(), 0, 
                             jobtracker, conf, jip, 1);
    jip.reduces = new TaskInProgress[] {tip};
  } else if (type == TaskType.JOB_SETUP) {
    tip = 
      new TaskInProgress(id, "dummy", JobSplit.EMPTY_TASK_SPLIT, 
                         jobtracker, conf, jip, 0, 1);
    jip.setup = new TaskInProgress[] {tip};
  } else if (type == TaskType.JOB_CLEANUP) {
    tip = 
      new TaskInProgress(id, "dummy", JobSplit.EMPTY_TASK_SPLIT, 
                         jobtracker, conf, jip, 0, 1);
    jip.cleanup = new TaskInProgress[] {tip};
  }
  return tip;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:TestJobRetire.java

示例6: initSetupCleanupTasks

synchronized void initSetupCleanupTasks(String jobFile) {
  if (!jobSetupCleanupNeeded) {
    LOG.info("Setup/Cleanup not needed for job " + jobId);
    // nothing to initialize
    return;
  }
  // create cleanup two cleanup tips, one map and one reduce.
  cleanup = new TaskInProgress[2];

  // cleanup map tip. This map doesn't use any splits. Just assign an empty
  // split.
  TaskSplitMetaInfo emptySplit = JobSplit.EMPTY_TASK_SPLIT;
  cleanup[0] = new TaskInProgress(jobId, jobFile, emptySplit, 
          jobtracker, conf, this, numMapTasks, 1);
  cleanup[0].setJobCleanupTask();

  // cleanup reduce tip.
  cleanup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,
                     numReduceTasks, jobtracker, conf, this, 1);
  cleanup[1].setJobCleanupTask();

  // create two setup tips, one map and one reduce.
  setup = new TaskInProgress[2];

  // setup map tip. This map doesn't use any split. Just assign an empty
  // split.
  setup[0] = new TaskInProgress(jobId, jobFile, emptySplit, 
          jobtracker, conf, this, numMapTasks + 1, 1);
  setup[0].setJobSetupTask();

  // setup reduce tip.
  setup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,
                     numReduceTasks + 1, jobtracker, conf, this, 1);
  setup[1].setJobSetupTask();
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:35,代码来源:JobInProgress.java

示例7: initTasks

/**
 * Initialize tasks(1 map and 1 reduce task each needs 2 slots, similar to
 * tasks of a high RAM job). 
 */
@Override
public synchronized void initTasks() throws IOException {
  super.initTasks();

  final int numSlotsPerTask = 2;
  maps = new TaskInProgress[1];
  reduces = new TaskInProgress[1];
  
  maps[0] = new FakeTaskInProgress(getJobID(), "test",  
      JobSplit.EMPTY_TASK_SPLIT,
      jobtracker, getJobConf(), this, 0, numSlotsPerTask);
  TaskAttemptID attemptId = new TaskAttemptID(maps[0].getTIPId(), 0);
  
  // make this task a taskCleanup task of a map task
  mapCleanupTasks.add(attemptId);
  TaskStatus stat = new MapTaskStatus(attemptId, 0.01f, 2,
      TaskStatus.State.FAILED_UNCLEAN, "", "", trackers[0],
      TaskStatus.Phase.MAP, new Counters());
  maps[0].updateStatus(stat);
  
  //similarly for reduce task's taskCleanup task
  reduces[0] = new FakeTaskInProgress(getJobID(), "test", 1,
      0, jobtracker, getJobConf(), this, numSlotsPerTask);
  attemptId = new TaskAttemptID(reduces[0].getTIPId(), 0);
  
  // make this task a taskCleanup task of a reduce task
  reduceCleanupTasks.add(attemptId);
  stat = new ReduceTaskStatus(attemptId, 0.01f, 2,
      TaskStatus.State.FAILED_UNCLEAN, "", "", trackers[0],
      TaskStatus.Phase.REDUCE, new Counters());
  reduces[0].updateStatus(stat);
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:36,代码来源:TestSetupTaskScheduling.java

示例8: createSplits

@Override
TaskSplitMetaInfo [] createSplits(org.apache.hadoop.mapreduce.JobID jobId){
  TaskSplitMetaInfo[] splits = 
    new TaskSplitMetaInfo[numMapTasks];
  for (int i = 0; i < numMapTasks; i++) {
    splits[i] = JobSplit.EMPTY_TASK_SPLIT;
  }
  return splits;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:9,代码来源:FakeObjectUtilities.java

示例9: initTasks

@Override
 public synchronized void initTasks() throws IOException {
   // initTasks is needed to create non-empty cleanup and setup TIP
   // arrays, otherwise calls such as job.getTaskInProgress will fail
   JobID jobId = getJobID();
   JobConf conf = getJobConf();
   String jobFile = "";
   // create two cleanup tips, one map and one reduce.
   cleanup = new TaskInProgress[2];
   // cleanup map tip.
   cleanup[0] = new TaskInProgress(jobId, jobFile, null, 
           jobtracker, conf, this, numMapTasks, 1);
   cleanup[0].setJobCleanupTask();
   // cleanup reduce tip.
   cleanup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,
                      numReduceTasks, jobtracker, conf, this, 1);
   cleanup[1].setJobCleanupTask();
   // create two setup tips, one map and one reduce.
   setup = new TaskInProgress[2];
   // setup map tip.
   setup[0] = new TaskInProgress(jobId, jobFile, null, 
           jobtracker, conf, this, numMapTasks + 1, 1);
   setup[0].setJobSetupTask();
   // setup reduce tip.
   setup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,
                      numReduceTasks + 1, jobtracker, conf, this, 1);
   setup[1].setJobSetupTask();
   // create maps
   numMapTasks = conf.getNumMapTasks();
   maps = new TaskInProgress[numMapTasks];
   JobSplit.TaskSplitMetaInfo split = JobSplit.EMPTY_TASK_SPLIT;
   for (int i = 0; i < numMapTasks; i++) {
     String[] inputLocations = null;
     if (mapInputLocations != null)
       inputLocations = mapInputLocations[i];
     maps[i] = new FakeTaskInProgress(getJobID(), i,
         getJobConf(), this, inputLocations, split);
     if (mapInputLocations == null) // Job has no locality info
       nonLocalMaps.add(maps[i]);
   }
   // create reduces
   numReduceTasks = conf.getNumReduceTasks();
   reduces = new TaskInProgress[numReduceTasks];
   for (int i = 0; i < numReduceTasks; i++) {
     reduces[i] = new FakeTaskInProgress(getJobID(), i,
         getJobConf(), this);
   }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:48,代码来源:TestFairScheduler.java

示例10: initTasks

@Override
public synchronized void initTasks() throws IOException {
  // initTasks is needed to create non-empty cleanup and setup TIP
  // arrays, otherwise calls such as job.getTaskInProgress will fail
  JobID jobId = getJobID();
  JobConf conf = getJobConf();
  String jobFile = "";
  // create two cleanup tips, one map and one reduce.
  cleanup = new TaskInProgress[2];
  // cleanup map tip.
  cleanup[0] = new TaskInProgress(jobId, jobFile, null, 
          jobtracker, conf, this, numMapTasks, 1);
  cleanup[0].setJobCleanupTask();
  // cleanup reduce tip.
  cleanup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,
                     numReduceTasks, jobtracker, conf, this, 1);
  cleanup[1].setJobCleanupTask();
  // create two setup tips, one map and one reduce.
  setup = new TaskInProgress[2];
  // setup map tip.
  setup[0] = new TaskInProgress(jobId, jobFile, null, 
          jobtracker, conf, this, numMapTasks + 1, 1);
  setup[0].setJobSetupTask();
  // setup reduce tip.
  setup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,
                     numReduceTasks + 1, jobtracker, conf, this, 1);
  setup[1].setJobSetupTask();
  // create maps
  numMapTasks = conf.getNumMapTasks();
  maps = new TaskInProgress[numMapTasks];
  // empty format
  JobSplit.TaskSplitMetaInfo split = JobSplit.EMPTY_TASK_SPLIT;
  for (int i = 0; i < numMapTasks; i++) {
    String[] inputLocations = null;
    if (mapInputLocations != null)
      inputLocations = mapInputLocations[i];
    maps[i] = new FakeTaskInProgress(getJobID(), i,
        getJobConf(), this, inputLocations, split, jobtracker);
    if (mapInputLocations == null) // Job has no locality info
      nonLocalMaps.add(maps[i]);
  }
  // create reduces
  numReduceTasks = conf.getNumReduceTasks();
  reduces = new TaskInProgress[numReduceTasks];
  for (int i = 0; i < numReduceTasks; i++) {
    reduces[i] = new FakeTaskInProgress(getJobID(), i,
        getJobConf(), this, jobtracker);
  }
  
  initialized = true;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:51,代码来源:TestFairScheduler.java

示例11: initTasks

@Override
public synchronized void initTasks() throws IOException {
  // initTasks is needed to create non-empty cleanup and setup TIP
  // arrays, otherwise calls such as job.getTaskInProgress will fail
  JobID jobId = getJobID();
  JobConf conf = getJobConf();
  String jobFile = "";
  // create two cleanup tips, one map and one reduce.
  cleanup = new TaskInProgress[2];
  // cleanup map tip.
  cleanup[0] = new TaskInProgress(jobId, jobFile, null, jobtracker, conf,
      this, numMapTasks, 1);
  cleanup[0].setJobCleanupTask();
  // cleanup reduce tip.
  cleanup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,
      numReduceTasks, jobtracker, conf, this, 1);
  cleanup[1].setJobCleanupTask();
  // create two setup tips, one map and one reduce.
  setup = new TaskInProgress[2];
  // setup map tip.
  setup[0] = new TaskInProgress(jobId, jobFile, null, jobtracker, conf, this,
      numMapTasks + 1, 1);
  setup[0].setJobSetupTask();
  // setup reduce tip.
  setup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,
      numReduceTasks + 1, jobtracker, conf, this, 1);
  setup[1].setJobSetupTask();
  // create maps
  numMapTasks = conf.getNumMapTasks();
  maps = new TaskInProgress[numMapTasks];
  // empty format
  JobSplit.TaskSplitMetaInfo split = JobSplit.EMPTY_TASK_SPLIT;
  for (int i = 0; i < numMapTasks; i++) {
    String[] inputLocations = null;
    if (mapInputLocations != null)
      inputLocations = mapInputLocations[i];
    maps[i] = new HFSPFakeTaskInProgress(this.getJobID(), this.jobTracker,
        true, i, this.getJobConf(), this, inputLocations, split, this.clock);
    if (mapInputLocations == null) // Job has no locality info
      nonLocalMaps.add(maps[i]);
  }
  // create reduces
  numReduceTasks = conf.getNumReduceTasks();
  reduces = new TaskInProgress[numReduceTasks];
  for (int i = 0; i < numReduceTasks; i++) {
    reduces[i] = new HFSPFakeTaskInProgress(getJobID(), this.jobTracker,
        false, i, getJobConf(), this, null, split, clock);
  }
  this.tasksInited = true;
}
 
开发者ID:melrief,项目名称:HFSP,代码行数:50,代码来源:HFSPFakeJobInProgress.java

示例12: obtainNewMapTask

@Override
public synchronized Task obtainNewMapTask(
  final TaskTrackerStatus tts, int clusterSize,
  int ignored) throws IOException {
  boolean areAllMapsRunning = (mapTaskCtr == numMapTasks);
  if (areAllMapsRunning) {
    if (!getJobConf().getMapSpeculativeExecution() ||
      speculativeMapTasks > 0) {
      return null;
    }
  }
  TaskAttemptID attemptId = getTaskAttemptID(true, areAllMapsRunning);
  JobSplit.TaskSplitMetaInfo split = JobSplit.EMPTY_TASK_SPLIT;
  Task task = new MapTask(
    "", attemptId, 0, split.getSplitIndex(), super.numSlotsPerMap) {
    @Override
    public String toString() {
      return String.format("%s on %s", getTaskID(), tts.getTrackerName());
    }
  };
  taskTrackerManager.startTask(tts.getTrackerName(), task);
  runningMapTasks++;
  // create a fake TIP and keep track of it
  FakeTaskInProgress mapTip = new FakeTaskInProgress(
    getJobID(),
    getJobConf(), task, true, this, split);
  mapTip.taskStatus.setRunState(TaskStatus.State.RUNNING);
  if (areAllMapsRunning) {
    speculativeMapTasks++;
    //you have scheduled a speculative map. Now set all tips in the
    //map tips not to have speculative task.
    for (TaskInProgress t : mapTips) {
      if (t instanceof FakeTaskInProgress) {
        FakeTaskInProgress mt = (FakeTaskInProgress) t;
        mt.hasSpeculativeMap = false;
      }
    }
  } else {
    //add only non-speculative tips.
    mapTips.add(mapTip);
    //add the tips to the JobInProgress TIPS
    maps = mapTips.toArray(new TaskInProgress[mapTips.size()]);
  }
  return task;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:45,代码来源:CapacityTestUtils.java

示例13: initTasks

@Override
public synchronized void initTasks() throws IOException {
  // initTasks is needed to create non-empty cleanup and setup TIP
  // arrays, otherwise calls such as job.getTaskInProgress will fail
  JobID jobId = getJobID();
  JobConf conf = getJobConf();
  String jobFile = "";
  // create two cleanup tips, one map and one reduce.
  cleanup = new TaskInProgress[2];
  // cleanup map tip.
  cleanup[0] = new TaskInProgress(jobId, jobFile, null, 
          jobtracker, conf, this, numMapTasks, 1);
  cleanup[0].setJobCleanupTask();
  // cleanup reduce tip.
  cleanup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,
                     numReduceTasks, jobtracker, conf, this, 1);
  cleanup[1].setJobCleanupTask();
  // create two setup tips, one map and one reduce.
  setup = new TaskInProgress[2];
  // setup map tip.
  setup[0] = new TaskInProgress(jobId, jobFile, null, 
          jobtracker, conf, this, numMapTasks + 1, 1);
  setup[0].setJobSetupTask();
  // setup reduce tip.
  setup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,
                     numReduceTasks + 1, jobtracker, conf, this, 1);
  setup[1].setJobSetupTask();
  // create maps
  numMapTasks = conf.getNumMapTasks();
  maps = new TaskInProgress[numMapTasks];
  // empty format
  JobSplit.TaskSplitMetaInfo split = JobSplit.EMPTY_TASK_SPLIT;
  for (int i = 0; i < numMapTasks; i++) {
    String[] inputLocations = null;
    if (mapInputLocations != null)
      inputLocations = mapInputLocations[i];
    maps[i] = new FakeTaskInProgress(getJobID(), i,
        getJobConf(), this, inputLocations, split);
    if (mapInputLocations == null) // Job has no locality info
      nonLocalMaps.add(maps[i]);
  }
  // create reduces
  numReduceTasks = conf.getNumReduceTasks();
  reduces = new TaskInProgress[numReduceTasks];
  for (int i = 0; i < numReduceTasks; i++) {
    reduces[i] = new FakeTaskInProgress(getJobID(), i,
        getJobConf(), this);
  }
  
  initialized = true;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:51,代码来源:TestFairScheduler.java


注:本文中的org.apache.hadoop.mapreduce.split.JobSplit.EMPTY_TASK_SPLIT属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。