当前位置: 首页>>代码示例>>Java>>正文


Java TaskAttempt类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt的典型用法代码示例。如果您正苦于以下问题:Java TaskAttempt类的具体用法?Java TaskAttempt怎么用?Java TaskAttempt使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TaskAttempt类属于org.apache.hadoop.mapreduce.v2.app.job包,在下文中一共展示了TaskAttempt类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: if

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
private long storedPerAttemptValue
     (Map<TaskAttempt, AtomicLong> data, TaskAttemptId attemptID) {
  TaskId taskID = attemptID.getTaskId();
  JobId jobID = taskID.getJobId();
  Job job = context.getJob(jobID);

  Task task = job.getTask(taskID);

  if (task == null) {
    return -1L;
  }

  TaskAttempt taskAttempt = task.getAttempt(attemptID);

  if (taskAttempt == null) {
    return -1L;
  }

  AtomicLong estimate = data.get(taskAttempt);

  return estimate == null ? -1L : estimate.get();

}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:LegacyTaskRuntimeEstimator.java

示例2: JobTaskAttemptCounterInfo

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
public JobTaskAttemptCounterInfo(TaskAttempt taskattempt) {

    this.id = MRApps.toString(taskattempt.getID());
    total = taskattempt.getCounters();
    taskAttemptCounterGroup = new ArrayList<TaskCounterGroupInfo>();
    if (total != null) {
      for (CounterGroup g : total) {
        if (g != null) {
          TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g);
          if (cginfo != null) {
            taskAttemptCounterGroup.add(cginfo);
          }
        }
      }
    }
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:JobTaskAttemptCounterInfo.java

示例3: TaskAttemptInfo

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
public TaskAttemptInfo(TaskAttempt ta, TaskType type, Boolean isRunning) {
  final TaskAttemptReport report = ta.getReport();
  this.type = type.toString();
  this.id = MRApps.toString(ta.getID());
  this.nodeHttpAddress = ta.getNodeHttpAddress();
  this.startTime = report.getStartTime();
  this.finishTime = report.getFinishTime();
  this.assignedContainerId = ConverterUtils.toString(report.getContainerId());
  this.assignedContainer = report.getContainerId();
  this.progress = report.getProgress() * 100;
  this.status = report.getStateString();
  this.state = report.getTaskAttemptState();
  this.elapsedTime = Times
      .elapsed(this.startTime, this.finishTime, isRunning);
  if (this.elapsedTime == -1) {
    this.elapsedTime = 0;
  }
  this.diagnostics = report.getDiagnosticInfo();
  this.rack = ta.getNodeRackName();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TaskAttemptInfo.java

示例4: ReduceTaskAttemptInfo

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
public ReduceTaskAttemptInfo(TaskAttempt ta, TaskType type) {
  super(ta, type, false);

  this.shuffleFinishTime = ta.getShuffleFinishTime();
  this.mergeFinishTime = ta.getSortFinishTime();
  this.elapsedShuffleTime = Times.elapsed(this.startTime,
      this.shuffleFinishTime, false);
  if (this.elapsedShuffleTime == -1) {
    this.elapsedShuffleTime = 0;
  }
  this.elapsedMergeTime = Times.elapsed(this.shuffleFinishTime,
      this.mergeFinishTime, false);
  if (this.elapsedMergeTime == -1) {
    this.elapsedMergeTime = 0;
  }
  this.elapsedReduceTime = Times.elapsed(this.mergeFinishTime,
      this.finishTime, false);
  if (this.elapsedReduceTime == -1) {
    this.elapsedReduceTime = 0;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:ReduceTaskAttemptInfo.java

示例5: getJobTaskAttempts

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
@GET
@Path("/jobs/{jobid}/tasks/{taskid}/attempts")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr,
    @PathParam("jobid") String jid, @PathParam("taskid") String tid) {

  init();
  TaskAttemptsInfo attempts = new TaskAttemptsInfo();
  Job job = getJobFromJobIdString(jid, appCtx);
  checkAccess(job, hsr);
  Task task = getTaskFromTaskIdString(tid, job);

  for (TaskAttempt ta : task.getAttempts().values()) {
    if (ta != null) {
      if (task.getType() == TaskType.REDUCE) {
        attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
      } else {
        attempts.add(new TaskAttemptInfo(ta, task.getType(), true));
      }
    }
  }
  return attempts;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:AMWebServices.java

示例6: getJobTaskAttemptId

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
@GET
@Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr,
    @PathParam("jobid") String jid, @PathParam("taskid") String tid,
    @PathParam("attemptid") String attId) {

  init();
  Job job = getJobFromJobIdString(jid, appCtx);
  checkAccess(job, hsr);
  Task task = getTaskFromTaskIdString(tid, job);
  TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
  if (task.getType() == TaskType.REDUCE) {
    return new ReduceTaskAttemptInfo(ta, task.getType());
  } else {
    return new TaskAttemptInfo(ta, task.getType(), true);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:AMWebServices.java

示例7: getTaskAttempts

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
@Override
protected Collection<TaskAttempt> getTaskAttempts() {
  List<TaskAttempt> fewTaskAttemps = new ArrayList<TaskAttempt>();
  String taskTypeStr = $(TASK_TYPE);
  TaskType taskType = MRApps.taskType(taskTypeStr);
  String attemptStateStr = $(ATTEMPT_STATE);
  TaskAttemptStateUI neededState = MRApps
      .taskAttemptState(attemptStateStr);
  for (Task task : super.app.getJob().getTasks(taskType).values()) {
    Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
    for (TaskAttempt attempt : attempts.values()) {
      if (neededState.correspondsTo(attempt.getState())) {
        fewTaskAttemps.add(attempt);
      }
    }
  }
  return fewTaskAttemps;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:AttemptsPage.java

示例8: getAttempts

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
@Override
public Map<TaskAttemptId, TaskAttempt> getAttempts() {
  readLock.lock();

  try {
    if (attempts.size() <= 1) {
      return attempts;
    }
    
    Map<TaskAttemptId, TaskAttempt> result
        = new LinkedHashMap<TaskAttemptId, TaskAttempt>();
    result.putAll(attempts);

    return result;
  } finally {
    readLock.unlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TaskImpl.java

示例9: getCounters

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
@Override
  public Counters getCounters() {
    Counters counters = null;
    readLock.lock();
    try {
      TaskAttempt bestAttempt = selectBestAttempt();
      if (bestAttempt != null) {
        counters = bestAttempt.getCounters();
      } else {
        counters = TaskAttemptImpl.EMPTY_COUNTERS;
//        counters.groups = new HashMap<CharSequence, CounterGroup>();
      }
      return counters;
    } finally {
      readLock.unlock();
    }
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TaskImpl.java

示例10: handleTaskAttemptCompletion

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);
    
    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TaskImpl.java

示例11: writeBadOutput

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
private void writeBadOutput(TaskAttempt attempt, Configuration conf)
  throws Exception {
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, 
      TypeConverter.fromYarn(attempt.getID()));
 
  TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat
      .getRecordWriter(tContext);
  
  NullWritable nullWritable = NullWritable.get();
  try {
    theRecordWriter.write(key2, val2);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val2);
    theRecordWriter.write(nullWritable, val1);
    theRecordWriter.write(key1, nullWritable);
    theRecordWriter.write(key2, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key1, val1);
  } finally {
    theRecordWriter.close(tContext);
  }
  
  OutputFormat outputFormat = ReflectionUtils.newInstance(
      tContext.getOutputFormatClass(), conf);
  OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
  committer.commitTask(tContext);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestRecovery.java

示例12: writeOutput

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
private void writeOutput(TaskAttempt attempt, Configuration conf)
  throws Exception {
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, 
      TypeConverter.fromYarn(attempt.getID()));
  
  TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat
      .getRecordWriter(tContext);
  
  NullWritable nullWritable = NullWritable.get();
  try {
    theRecordWriter.write(key1, val1);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val1);
    theRecordWriter.write(nullWritable, val2);
    theRecordWriter.write(key2, nullWritable);
    theRecordWriter.write(key1, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key2, val2);
  } finally {
    theRecordWriter.close(tContext);
  }
  
  OutputFormat outputFormat = ReflectionUtils.newInstance(
      tContext.getOutputFormatClass(), conf);
  OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
  committer.commitTask(tContext);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestRecovery.java

示例13: verifyHsTaskAttempts

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
public void verifyHsTaskAttempts(JSONObject json, Task task)
    throws JSONException {
  assertEquals("incorrect number of elements", 1, json.length());
  JSONObject attempts = json.getJSONObject("taskAttempts");
  assertEquals("incorrect number of elements", 1, json.length());
  JSONArray arr = attempts.getJSONArray("taskAttempt");
  for (TaskAttempt att : task.getAttempts().values()) {
    TaskAttemptId id = att.getID();
    String attid = MRApps.toString(id);
    Boolean found = false;

    for (int i = 0; i < arr.length(); i++) {
      JSONObject info = arr.getJSONObject(i);
      if (attid.matches(info.getString("id"))) {
        found = true;
        verifyHsTaskAttempt(info, att, task.getType());
      }
    }
    assertTrue("task attempt with id: " + attid
        + " not in web service output", found);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestHsWebServicesAttempts.java

示例14: verifyHsJobTaskAttemptCounters

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
public void verifyHsJobTaskAttemptCounters(JSONObject info, TaskAttempt att)
    throws JSONException {

  assertEquals("incorrect number of elements", 2, info.length());

  WebServicesTestUtils.checkStringMatch("id", MRApps.toString(att.getID()),
      info.getString("id"));

  // just do simple verification of fields - not data is correct
  // in the fields
  JSONArray counterGroups = info.getJSONArray("taskAttemptCounterGroup");
  for (int i = 0; i < counterGroups.length(); i++) {
    JSONObject counterGroup = counterGroups.getJSONObject(i);
    String name = counterGroup.getString("counterGroupName");
    assertTrue("name not set", (name != null && !name.isEmpty()));
    JSONArray counters = counterGroup.getJSONArray("counter");
    for (int j = 0; j < counters.length(); j++) {
      JSONObject counter = counters.getJSONObject(j);
      String counterName = counter.getString("name");
      assertTrue("name not set",
          (counterName != null && !counterName.isEmpty()));
      long value = counter.getLong("value");
      assertTrue("value  >= 0", value >= 0);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestHsWebServicesAttempts.java

示例15: verifyAMTaskAttemptXML

import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入依赖的package包/类
public void verifyAMTaskAttemptXML(Element element, TaskAttempt att,
    TaskType ttype) {
  verifyTaskAttemptGeneric(att, ttype,
      WebServicesTestUtils.getXmlString(element, "id"),
      WebServicesTestUtils.getXmlString(element, "state"),
      WebServicesTestUtils.getXmlString(element, "type"),
      WebServicesTestUtils.getXmlString(element, "rack"),
      WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"),
      WebServicesTestUtils.getXmlString(element, "diagnostics"),
      WebServicesTestUtils.getXmlString(element, "assignedContainerId"),
      WebServicesTestUtils.getXmlLong(element, "startTime"),
      WebServicesTestUtils.getXmlLong(element, "finishTime"),
      WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
      WebServicesTestUtils.getXmlFloat(element, "progress"));

  if (ttype == TaskType.REDUCE) {
    verifyReduceTaskAttemptGeneric(att,
        WebServicesTestUtils.getXmlLong(element, "shuffleFinishTime"),
        WebServicesTestUtils.getXmlLong(element, "mergeFinishTime"),
        WebServicesTestUtils.getXmlLong(element, "elapsedShuffleTime"),
        WebServicesTestUtils.getXmlLong(element, "elapsedMergeTime"),
        WebServicesTestUtils.getXmlLong(element, "elapsedReduceTime"));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestAMWebServicesAttempts.java


注:本文中的org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。