当前位置: 首页>>代码示例>>Java>>正文


Java TypeConverter类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.TypeConverter的典型用法代码示例。如果您正苦于以下问题:Java TypeConverter类的具体用法?Java TypeConverter怎么用?Java TypeConverter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TypeConverter类属于org.apache.hadoop.mapreduce包,在下文中一共展示了TypeConverter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createStubbedJob

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
private static StubbedJob createStubbedJob(Configuration conf,
    Dispatcher dispatcher, int numSplits, AppContext appContext) {
  JobID jobID = JobID.forName("job_1234567890000_0001");
  JobId jobId = TypeConverter.toYarn(jobID);
  if (appContext == null) {
    appContext = mock(AppContext.class);
    when(appContext.hasSuccessfullyUnregistered()).thenReturn(true);
  }
  StubbedJob job = new StubbedJob(jobId,
      ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0),
      conf,dispatcher.getEventHandler(), true, "somebody", numSplits, appContext);
  dispatcher.register(JobEventType.class, job);
  EventHandler mockHandler = mock(EventHandler.class);
  dispatcher.register(TaskEventType.class, mockHandler);
  dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class,
      mockHandler);
  dispatcher.register(JobFinishEvent.Type.class, mockHandler);
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestJobImpl.java

示例2: handle

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
@Override
public void handle(ContainerAllocatorEvent event) {
  ContainerId cId =
      ContainerId.newContainerId(getContext().getApplicationAttemptId(),
        containerCount++);
  NodeId nodeId = NodeId.newInstance(NM_HOST, NM_PORT);
  Resource resource = Resource.newInstance(1234, 2, 2);
  ContainerTokenIdentifier containerTokenIdentifier =
      new ContainerTokenIdentifier(cId, nodeId.toString(), "user",
      resource, System.currentTimeMillis() + 10000, 42, 42,
      Priority.newInstance(0), 0);
  Token containerToken = newContainerToken(nodeId, "password".getBytes(),
        containerTokenIdentifier);
  Container container = Container.newInstance(cId, nodeId,
      NM_HOST + ":" + NM_HTTP_PORT, resource, null, containerToken);
  JobID id = TypeConverter.fromYarn(applicationId);
  JobId jobId = TypeConverter.toYarn(id);
  getContext().getEventHandler().handle(new JobHistoryEvent(jobId, 
      new NormalizedResourceEvent(
          org.apache.hadoop.mapreduce.TaskType.REDUCE,
      100)));
  getContext().getEventHandler().handle(new JobHistoryEvent(jobId, 
      new NormalizedResourceEvent(
          org.apache.hadoop.mapreduce.TaskType.MAP,
      100)));
  getContext().getEventHandler().handle(
      new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
          container, null));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:MRApp.java

示例3: testJobNamePercentEncoding

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
@Test
public void testJobNamePercentEncoding() throws IOException {
  JobIndexInfo info = new JobIndexInfo();
  JobID oldJobId = JobID.forName(JOB_ID);
  JobId jobId = TypeConverter.toYarn(oldJobId);
  info.setJobId(jobId);
  info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
  info.setUser(USER_NAME);
  info.setJobName(JOB_NAME_WITH_DELIMITER);
  info.setFinishTime(Long.parseLong(FINISH_TIME));
  info.setNumMaps(Integer.parseInt(NUM_MAPS));
  info.setNumReduces(Integer.parseInt(NUM_REDUCES));
  info.setJobStatus(JOB_STATUS);
  info.setQueueName(QUEUE_NAME);
  info.setJobStartTime(Long.parseLong(JOB_START_TIME));

  String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
  Assert.assertTrue("Job name not encoded correctly into job history file",
      jobHistoryFile.contains(JOB_NAME_WITH_DELIMITER_ESCAPE));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestFileNameIndexUtils.java

示例4: testQueueNamePercentEncoding

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
@Test
public void testQueueNamePercentEncoding() throws IOException {
  JobIndexInfo info = new JobIndexInfo();
  JobID oldJobId = JobID.forName(JOB_ID);
  JobId jobId = TypeConverter.toYarn(oldJobId);
  info.setJobId(jobId);
  info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
  info.setUser(USER_NAME);
  info.setJobName(JOB_NAME);
  info.setFinishTime(Long.parseLong(FINISH_TIME));
  info.setNumMaps(Integer.parseInt(NUM_MAPS));
  info.setNumReduces(Integer.parseInt(NUM_REDUCES));
  info.setJobStatus(JOB_STATUS);
  info.setQueueName(QUEUE_NAME_WITH_DELIMITER);
  info.setJobStartTime(Long.parseLong(JOB_START_TIME));

  String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
  Assert.assertTrue("Queue name not encoded correctly into job history file",
      jobHistoryFile.contains(QUEUE_NAME_WITH_DELIMITER_ESCAPE));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestFileNameIndexUtils.java

示例5: loadAllTasks

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
private void loadAllTasks() {
  if (tasksLoaded.get()) {
    return;
  }
  tasksLock.lock();
  try {
    if (tasksLoaded.get()) {
      return;
    }
    for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
      TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
      TaskInfo taskInfo = entry.getValue();
      Task task = new CompletedTask(yarnTaskID, taskInfo);
      tasks.put(yarnTaskID, task);
      if (task.getType() == TaskType.MAP) {
        mapTasks.put(task.getID(), task);
      } else if (task.getType() == TaskType.REDUCE) {
        reduceTasks.put(task.getID(), task);
      }
    }
    tasksLoaded.set(true);
  } finally {
    tasksLock.unlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:CompletedJob.java

示例6: canCommit

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
/**
 * Child checking whether it can commit.
 * 
 * <br>
 * Commit is a two-phased protocol. First the attempt informs the
 * ApplicationMaster that it is
 * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
 * the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
 * a legacy from the centralized commit protocol handling by the JobTracker.
 */
@Override
public boolean canCommit(TaskAttemptID taskAttemptID) throws IOException {
  LOG.info("Commit go/no-go request from " + taskAttemptID.toString());
  // An attempt is asking if it can commit its output. This can be decided
  // only by the task which is managing the multiple attempts. So redirect the
  // request there.
  org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
      TypeConverter.toYarn(taskAttemptID);

  taskHeartbeatHandler.progressing(attemptID);

  // tell task to retry later if AM has not heard from RM within the commit
  // window to help avoid double-committing in a split-brain situation
  long now = context.getClock().getTime();
  if (now - rmHeartbeatHandler.getLastHeartbeatTime() > commitWindowMs) {
    return false;
  }

  Job job = context.getJob(attemptID.getTaskId().getJobId());
  Task task = job.getTask(attemptID.getTaskId());
  return task.canCommit(attemptID);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TaskAttemptListenerImpl.java

示例7: commitPending

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
/**
 * TaskAttempt is reporting that it is in commit_pending and it is waiting for
 * the commit Response
 * 
 * <br>
 * Commit it a two-phased protocol. First the attempt informs the
 * ApplicationMaster that it is
 * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
 * the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
 * a legacy from the centralized commit protocol handling by the JobTracker.
 */
@Override
public void commitPending(TaskAttemptID taskAttemptID, TaskStatus taskStatsu)
        throws IOException, InterruptedException {
  LOG.info("Commit-pending state update from " + taskAttemptID.toString());
  // An attempt is asking if it can commit its output. This can be decided
  // only by the task which is managing the multiple attempts. So redirect the
  // request there.
  org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
      TypeConverter.toYarn(taskAttemptID);

  taskHeartbeatHandler.progressing(attemptID);
  //Ignorable TaskStatus? - since a task will send a LastStatusUpdate
  context.getEventHandler().handle(
      new TaskAttemptEvent(attemptID, 
          TaskAttemptEventType.TA_COMMIT_PENDING));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TaskAttemptListenerImpl.java

示例8: reportDiagnosticInfo

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
@Override
 public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
   diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
   LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
       + diagnosticInfo);

   org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
     TypeConverter.toYarn(taskAttemptID);
   taskHeartbeatHandler.progressing(attemptID);

   // This is mainly used for cases where we want to propagate exception traces
   // of tasks that fail.

   // This call exists as a hadoop mapreduce legacy wherein all changes in
   // counters/progress/phase/output-size are reported through statusUpdate()
   // call but not diagnosticInformation.
   context.getEventHandler().handle(
       new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
 }
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TaskAttemptListenerImpl.java

示例9: serviceInit

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
@Override
protected void serviceInit(Configuration conf) throws Exception {
  super.serviceInit(conf);
  commitThreadCancelTimeoutMs = conf.getInt(
      MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS,
      MRJobConfig.DEFAULT_MR_AM_COMMITTER_CANCEL_TIMEOUT_MS);
  commitWindowMs = conf.getLong(MRJobConfig.MR_AM_COMMIT_WINDOW_MS,
      MRJobConfig.DEFAULT_MR_AM_COMMIT_WINDOW_MS);
  try {
    fs = FileSystem.get(conf);
    JobID id = TypeConverter.fromYarn(context.getApplicationID());
    JobId jobId = TypeConverter.toYarn(id);
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
    endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user, jobId);
    endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user, jobId);
  } catch (IOException e) {
    throw new YarnRuntimeException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:CommitterEventHandler.java

示例10: createTaskAttemptUnsuccessfulCompletionEvent

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
private static
    TaskAttemptUnsuccessfulCompletionEvent
    createTaskAttemptUnsuccessfulCompletionEvent(TaskAttemptImpl taskAttempt,
        TaskAttemptStateInternal attemptState) {
  TaskAttemptUnsuccessfulCompletionEvent tauce =
      new TaskAttemptUnsuccessfulCompletionEvent(
          TypeConverter.fromYarn(taskAttempt.attemptId),
          TypeConverter.fromYarn(taskAttempt.attemptId.getTaskId()
              .getTaskType()), attemptState.toString(),
          taskAttempt.finishTime,
          taskAttempt.container == null ? "UNKNOWN"
              : taskAttempt.container.getNodeId().getHost(),
          taskAttempt.container == null ? -1 
              : taskAttempt.container.getNodeId().getPort(),    
          taskAttempt.nodeRackName == null ? "UNKNOWN" 
              : taskAttempt.nodeRackName,
          StringUtils.join(
              LINE_SEPARATOR, taskAttempt.getDiagnostics()),
              taskAttempt.getCounters(), taskAttempt
              .getProgressSplitBlock().burst());
  return tauce;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TaskAttemptImpl.java

示例11: sendLaunchedEvents

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TaskAttemptImpl.java

示例12: createTaskFailedEvent

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
private static TaskFailedEvent createTaskFailedEvent(TaskImpl task, List<String> diag, TaskStateInternal taskState, TaskAttemptId taId) {
  StringBuilder errorSb = new StringBuilder();
  if (diag != null) {
    for (String d : diag) {
      errorSb.append(", ").append(d);
    }
  }
  TaskFailedEvent taskFailedEvent = new TaskFailedEvent(
      TypeConverter.fromYarn(task.taskId),
   // Hack since getFinishTime needs isFinished to be true and that doesn't happen till after the transition.
      task.getFinishTime(taId),
      TypeConverter.fromYarn(task.getType()),
      errorSb.toString(),
      taskState.toString(),
      taId == null ? null : TypeConverter.fromYarn(taId),
      task.getCounters());
  return taskFailedEvent;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TaskImpl.java

示例13: getPreviousJobHistoryFileStream

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
public static FSDataInputStream getPreviousJobHistoryFileStream(
    Configuration conf, ApplicationAttemptId applicationAttemptId)
    throws IOException {
  FSDataInputStream in = null;
  Path historyFile = null;
  String jobId =
      TypeConverter.fromYarn(applicationAttemptId.getApplicationId())
        .toString();
  String jobhistoryDir =
      JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
  Path histDirPath =
      FileContext.getFileContext(conf).makeQualified(new Path(jobhistoryDir));
  FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf);
  // read the previous history file
  historyFile =
      fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(histDirPath,
        jobId, (applicationAttemptId.getAttemptId() - 1)));
  LOG.info("History file is at " + historyFile);
  in = fc.open(historyFile);
  return in;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:JobHistoryCopyService.java

示例14: writeBadOutput

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
private void writeBadOutput(TaskAttempt attempt, Configuration conf)
  throws Exception {
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, 
      TypeConverter.fromYarn(attempt.getID()));
 
  TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat
      .getRecordWriter(tContext);
  
  NullWritable nullWritable = NullWritable.get();
  try {
    theRecordWriter.write(key2, val2);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val2);
    theRecordWriter.write(nullWritable, val1);
    theRecordWriter.write(key1, nullWritable);
    theRecordWriter.write(key2, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key1, val1);
  } finally {
    theRecordWriter.close(tContext);
  }
  
  OutputFormat outputFormat = ReflectionUtils.newInstance(
      tContext.getOutputFormatClass(), conf);
  OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
  committer.commitTask(tContext);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestRecovery.java

示例15: writeOutput

import org.apache.hadoop.mapreduce.TypeConverter; //导入依赖的package包/类
private void writeOutput(TaskAttempt attempt, Configuration conf)
  throws Exception {
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, 
      TypeConverter.fromYarn(attempt.getID()));
  
  TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat
      .getRecordWriter(tContext);
  
  NullWritable nullWritable = NullWritable.get();
  try {
    theRecordWriter.write(key1, val1);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val1);
    theRecordWriter.write(nullWritable, val2);
    theRecordWriter.write(key2, nullWritable);
    theRecordWriter.write(key1, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key2, val2);
  } finally {
    theRecordWriter.close(tContext);
  }
  
  OutputFormat outputFormat = ReflectionUtils.newInstance(
      tContext.getOutputFormatClass(), conf);
  OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
  committer.commitTask(tContext);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestRecovery.java


注:本文中的org.apache.hadoop.mapreduce.TypeConverter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。