当前位置: 首页>>代码示例>>Java>>正文


Java TaskInProgress类代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.TaskTracker.TaskInProgress的典型用法代码示例。如果您正苦于以下问题:Java TaskInProgress类的具体用法?Java TaskInProgress怎么用?Java TaskInProgress使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TaskInProgress类属于org.apache.hadoop.mapred.TaskTracker包,在下文中一共展示了TaskInProgress类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: validateTipToJvm

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
synchronized public boolean validateTipToJvm(TaskInProgress tip, JVMId jvmId) {
  if (jvmId == null) {
    LOG.warn("Null jvmId. Cannot verify Jvm. validateTipToJvm returning false");
    return false;
  }
  TaskRunner taskRunner = jvmToRunningTask.get(jvmId);
  if (taskRunner == null) {
    return false; //JvmId not known.
  }
  TaskInProgress knownTip = taskRunner.getTaskInProgress();
  if (knownTip == tip) { // Valid to compare the addresses ? (or equals)
    return true;
  } else {
    return false;
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:17,代码来源:JvmManager.java

示例2: getTaskForJvm

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
synchronized public TaskInProgress getTaskForJvm(JVMId jvmId)
    throws IOException {
  if (jvmToRunningTask.containsKey(jvmId)) {
    //Incase of JVM reuse, tasks are returned to previously launched
    //JVM via this method. However when a new task is launched
    //the task being returned has to be initialized.
    TaskRunner taskRunner = jvmToRunningTask.get(jvmId);
    JvmRunner jvmRunner = jvmIdToRunner.get(jvmId);
    Task task = taskRunner.getTaskInProgress().getTask();

    jvmRunner.taskGiven(task);
    return taskRunner.getTaskInProgress();

  }
  return null;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:17,代码来源:JvmManager.java

示例3: initializeTracker

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
private void initializeTracker() throws IOException {
  tracker.setIndexCache(new IndexCache(trackerFConf));
  tracker.setTaskMemoryManagerEnabledFlag();

  // for test case system FS is the local FS
  tracker.systemFS = FileSystem.getLocal(trackerFConf);
  tracker.systemDirectory = new Path(TEST_ROOT_DIR.getAbsolutePath());
  tracker.setLocalFileSystem(tracker.systemFS);
  
  tracker.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>();
  tracker.runningJobs = new TreeMap<JobID, RunningJob>();
  trackerFConf.deleteLocalFiles(TaskTracker.SUBDIR);

  // setup task controller
  taskController = getTaskController();
  taskController.setConf(trackerFConf);
  taskController.setup(lDirAlloc, new LocalStorage(trackerFConf.getLocalDirs()));
  tracker.setTaskController(taskController);
  tracker.setLocalizer(new Localizer(tracker.getLocalFileSystem(),localDirs));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:21,代码来源:TestTaskTrackerLocalization.java

示例4: testCleanupTaskLocalization

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
/**
 * Localizes a cleanup task and validates permissions.
 * 
 * @throws InterruptedException 
 * @throws IOException 
 */
public void testCleanupTaskLocalization() throws IOException,
    InterruptedException {
  if (!canRun()) {
    return;
  }

  task.setTaskCleanupTask();
  // register task
  tip = tracker.new TaskInProgress(task, trackerFConf);

  // localize the job again.
  RunningJob rjob = tracker.localizeJob(tip);
  localizedJobConf = rjob.getJobConf();
  checkJobLocalization();

  // localize task cleanup attempt
  initializeTask();
  checkTaskLocalization();

}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:TestTaskTrackerLocalization.java

示例5: testWithClasspathPrecedence

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
@Test
public void testWithClasspathPrecedence() throws Throwable {
  ttConf.set(JobContext.MAPREDUCE_TASK_CLASSPATH_PRECEDENCE, "true");
  JobConf taskConf = new JobConf(ttConf);

  TaskTracker.RunningJob rjob = new TaskTracker.RunningJob(new JobID("jt", 1));
  TaskAttemptID attemptID = new TaskAttemptID("test", 0, true, 0, 0);
  Task task = new MapTask(null, attemptID, 0, null, MAP_SLOTS);
  task.setConf(taskConf);
  TaskInProgress tip = tt.new TaskInProgress(task, taskConf);
  MyTaskRunner taskRunner = new MyTaskRunner(tip, tt, taskConf, rjob);
  final File workDir = new File(TEST_DIR, "work");
  workDir.mkdir();
  
  List<String> classPaths = TaskRunner.getClassPaths(taskConf, workDir, null);
  Vector<String> vargs = taskRunner.getVMArgs(task.getTaskID(), workDir, classPaths, 100);

  String classpath = vargs.get(2);
  String[] cp = classpath.split(":");
  assertTrue(cp[0], cp[0].contains("testjob"));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:22,代码来源:TestTaskClasspathPrecedence.java

示例6: testWithoutClasspathPrecedence

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
@Test
public void testWithoutClasspathPrecedence() throws Throwable {
  ttConf.set(JobContext.MAPREDUCE_TASK_CLASSPATH_PRECEDENCE, "false");
  JobConf taskConf = new JobConf(ttConf);

  TaskTracker.RunningJob rjob = new TaskTracker.RunningJob(new JobID("jt", 1));
  TaskAttemptID attemptID = new TaskAttemptID("test", 0, true, 0, 0);
  Task task = new MapTask(null, attemptID, 0, null, MAP_SLOTS);
  task.setConf(taskConf);
  TaskInProgress tip = tt.new TaskInProgress(task, taskConf);
  MyTaskRunner taskRunner = new MyTaskRunner(tip, tt, taskConf, rjob);
  final File workDir = new File(TEST_DIR, "work");
  workDir.mkdir();
  
  List<String> classPaths = TaskRunner.getClassPaths(taskConf, workDir, null);
  Vector<String> vargs = taskRunner.getVMArgs(task.getTaskID(), workDir, classPaths, 100);

  String classpath = vargs.get(2);
  String[] cp = classpath.split(":");
  assertFalse(cp[0], cp[0].contains("testjob"));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:22,代码来源:TestTaskClasspathPrecedence.java

示例7: getTaskForJvm

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
synchronized public TaskInProgress getTaskForJvm(JVMId jvmId) {
  if (jvmToRunningTask.containsKey(jvmId)) {
    //Incase of JVM reuse, tasks are returned to previously launched
    //JVM via this method. However when a new task is launched
    //the task being returned has to be initialized.
    TaskRunner taskRunner = jvmToRunningTask.get(jvmId);
    JvmRunner jvmRunner = jvmIdToRunner.get(jvmId);
    Task task = taskRunner.getTaskInProgress().getTask();
    TaskControllerContext context = 
      new TaskController.TaskControllerContext();
    context.env = jvmRunner.env;
    context.task = task;
    //If we are returning the same task as which the JVM was launched
    //we don't initialize task once again.
    if(!jvmRunner.env.conf.get("mapred.task.id").
        equals(task.getTaskID().toString())) {
      tracker.getTaskController().initializeTask(context);
    }

    jvmRunner.taskGiven(task);
    return taskRunner.getTaskInProgress();

  }
  return null;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:JvmManager.java

示例8: launchTask

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
/**
 * The primary public method that should be called to 'run' a task. Handles
 * both map and reduce tasks and marks them as completed after the configured
 * time interval
 * @param tip
 */
public void launchTask(TaskInProgress tip) throws IOException {
  LOG.info("Launching simulated task " + tip.getTask().getTaskID() +
      " for job " + tip.getTask().getJobID());
  TaskUmbilicalProtocol umbilicalProtocol = taskTracker.getUmbilical(tip);
  // For map tasks, we can just finish the task after some time. Same thing
  // with cleanup tasks, as we don't need to be waiting for mappers to finish
  if (tip.getTask().isMapTask() || tip.getTask().isTaskCleanupTask() ||
    tip.getTask().isJobCleanupTask() || tip.getTask().isJobSetupTask() ) {
    addTipToFinish(tip, umbilicalProtocol);
  } else {
    MapperWaitThread mwt =
        new MapperWaitThread(tip, this, umbilicalProtocol);
    // Save a reference to the mapper wait thread so that we can stop them if
    // the task gets killed
    mapperWaitThreadMap.put(tip, mwt);
    mwt.start();
  }

}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:SimulatedTaskRunner.java

示例9: addTipToFinish

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
/**
 * Add the specified TaskInProgress to the priority queue of tasks to finish.
 * @param tip
 * @param umbilicalProtocol
 */
protected void addTipToFinish(TaskInProgress tip,
                              TaskUmbilicalProtocol umbilicalProtocol) {
  long currentTime = System.currentTimeMillis();
  long finishTime = currentTime + Math.abs(rand.nextLong()) %
      timeToFinishTask;
  LOG.info("Adding TIP " + tip.getTask().getTaskID() +
      " to finishing queue with start time " +
      currentTime + " and finish time " + finishTime +
      " (" + ((finishTime - currentTime) / 1000.0) + " sec) to thread " +
      getName());
  TipToFinish ttf = new TipToFinish(tip, finishTime, umbilicalProtocol);
  tipQueue.put(ttf);
  // Interrupt the waiting thread. We could put in additional logic to only
  // interrupt when necessary, but probably not worth the complexity.
  this.interrupt();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:22,代码来源:SimulatedTaskRunner.java

示例10: cancel

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
/**
 * Called in case the task needs to be killed. Canceling will kill any map
 * wait threads and also remove it from the queue of tasks that should be
 * marked as finished.
 * @param tip the killed TaskInProgress
 */
public void cancel(TaskInProgress tip) {
  LOG.info("Canceling task "  + tip.getTask().getTaskID() + " of job " +
      tip.getTask().getJobID());
  // Cancel & remove the map completion finish thread for reduce tasks.
  if (!tip.getTask().isMapTask() && !tip.getTask().isTaskCleanupTask()) {
    if (!mapperWaitThreadMap.containsKey(tip)) {
      throw new RuntimeException("Mapper wait thread doesn't exist " +
          "for " + tip.getTask().getTaskID());
    }
    LOG.debug("Interrupting mapper wait thread for " +
        tip.getTask().getTaskID() + " job " +
        tip.getTask().getJobID());
    mapperWaitThreadMap.get(tip).interrupt();
    LOG.debug("Removing mapper wait thread for " +
        tip.getTask().getTaskID() + " job " + tip.getTask().getJobID());
    mapperWaitThreadMap.remove(tip);
  } else {
    LOG.debug(tip.getTask().getTaskID() + " is not a reduce task, so " +
        "not canceling mapper wait thread");
  }
  removeFromFinishingQueue(tip);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:29,代码来源:SimulatedTaskRunner.java

示例11: localizeJob

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
@Override
RunningJob localizeJob(TaskInProgress tip)
    throws IOException, InterruptedException {
  if (firstJobStarted == false) {
    firstJobStarted = true;
    while (quitWaiting == false) {
      Thread.sleep(100);
    }
    firstJobFinished = true;
  }
  // mock out a RunningJob
  RunningJob rjob = mock(RunningJob.class);
  when(rjob.getLocalizedJobConf()).thenReturn(new Path("testing"));
  when(rjob.getJobConf()).thenReturn(new JobConf());
  jobLocalizedCount++;

  return rjob;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:19,代码来源:TestTaskLauncherThreaded.java

示例12: initializeTracker

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
private void initializeTracker() throws IOException {
  tracker.setIndexCache(new IndexCache(trackerFConf));
  tracker.setTaskMemoryManagerEnabledFlag();

  // for test case system FS is the local FS
  tracker.systemFS = FileSystem.getLocal(trackerFConf);
  tracker.setLocalFileSystem(tracker.systemFS);
  tracker.systemDirectory = new Path(TEST_ROOT_DIR.getAbsolutePath());

  tracker.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>();
  tracker.runningJobs = new TreeMap<JobID, RunningJob>();
  tracker.setAsyncDiskService(new MRAsyncDiskService(trackerFConf));
  tracker.getAsyncDiskService().cleanupAllVolumes();

  // Set up TaskTracker instrumentation
  tracker.setTaskTrackerInstrumentation(
      TaskTracker.createInstrumentation(tracker, trackerFConf));

  // setup task controller
  taskController = createTaskController();
  taskController.setConf(trackerFConf);
  taskController.setup();
  tracker.setTaskController(taskController);
  tracker.setLocalizer(new Localizer(tracker.getLocalFileSystem(), localDirs,
      taskController));
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:27,代码来源:TestTaskTrackerLocalization.java

示例13: testCleanupTaskLocalization

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
/**
 * Localizes a cleanup task and validates permissions.
 * 
 * @throws InterruptedException 
 * @throws IOException 
 */
public void testCleanupTaskLocalization() throws IOException,
    InterruptedException {
  if (!canRun()) {
    return;
  }

  task.setTaskCleanupTask();
  // register task
  tip = tracker.new TaskInProgress(task, trackerFConf);

  // localize the job.
  RunningJob rjob = tracker.localizeJob(tip);
  localizedJobConf = rjob.getJobConf();
  checkJobLocalization();

  // localize task cleanup attempt
  initializeTask();
  checkTaskLocalization();

}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:27,代码来源:TestTaskTrackerLocalization.java

示例14: isKillable

import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; //导入依赖的package包/类
/**
 * Check if a task can be killed to increase free memory
 * @param tid task attempt ID
 * @return true if the task can be killed
 */
private boolean isKillable(TaskAttemptID tid) {
    TaskInProgress tip = taskTracker.getRunningTask(tid);
    return tip != null && !tip.wasKilled() &&
           (tip.getRunState() == TaskStatus.State.RUNNING ||
            tip.getRunState() == TaskStatus.State.COMMIT_PENDING);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:12,代码来源:TaskMemoryManagerThread.java


注:本文中的org.apache.hadoop.mapred.TaskTracker.TaskInProgress类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。