當前位置: 首頁>>代碼示例>>Java>>正文


Java TaskType.REDUCE屬性代碼示例

本文整理匯總了Java中org.apache.hadoop.mapreduce.TaskType.REDUCE屬性的典型用法代碼示例。如果您正苦於以下問題:Java TaskType.REDUCE屬性的具體用法?Java TaskType.REDUCE怎麽用?Java TaskType.REDUCE使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在org.apache.hadoop.mapreduce.TaskType的用法示例。


在下文中一共展示了TaskType.REDUCE屬性的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: reduce

public void reduce(Text key, Iterable<IntWritable> values, 
                       Context context) throws IOException, InterruptedException {
  // Make one reducer slower for speculative execution
  TaskAttemptID taid = context.getTaskAttemptID();
  long sleepTime = 100;
  Configuration conf = context.getConfiguration();
  boolean test_speculate_reduce =
            conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);

  // IF TESTING REDUCE SPECULATIVE EXECUTION:
  //   Make the "*_r_000000_0" attempt take much longer than the others.
  //   When speculative execution is enabled, this should cause the attempt
  //   to be killed and restarted. At that point, the attempt ID will be
  //   "*_r_000000_1", so sleepTime will still remain 100ms.
  if ( (taid.getTaskType() == TaskType.REDUCE) && test_speculate_reduce
        && (taid.getTaskID().getId() == 0) && (taid.getId() == 0)) {
    sleepTime = 10000;
  }
  try{
    Thread.sleep(sleepTime);
  } catch(InterruptedException ie) {
    // Ignore
  }
  context.write(key,new IntWritable(0));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:TestSpeculativeExecution.java

示例2: testTaskAttemptFinishedEvent

/**
 * test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished
 * 
 * @throws Exception
 */
@Test(timeout = 10000)
public void testTaskAttemptFinishedEvent() throws Exception {

  JobID jid = new JobID("001", 1);
  TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
  TaskAttemptID taskAttemptId = new TaskAttemptID(tid, 3);
  Counters counters = new Counters();
  TaskAttemptFinishedEvent test = new TaskAttemptFinishedEvent(taskAttemptId,
      TaskType.REDUCE, "TEST", 123L, "RAKNAME", "HOSTNAME", "STATUS",
      counters);
  assertEquals(test.getAttemptId().toString(), taskAttemptId.toString());

  assertEquals(test.getCounters(), counters);
  assertEquals(test.getFinishTime(), 123L);
  assertEquals(test.getHostname(), "HOSTNAME");
  assertEquals(test.getRackName(), "RAKNAME");
  assertEquals(test.getState(), "STATUS");
  assertEquals(test.getTaskId(), tid);
  assertEquals(test.getTaskStatus(), "TEST");
  assertEquals(test.getTaskType(), TaskType.REDUCE);

}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:TestEvents.java

示例3: testTaskCompletionEvent

/**
 * test deprecated methods of TaskCompletionEvent
 */
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testTaskCompletionEvent() {
  TaskAttemptID taid = new TaskAttemptID("001", 1, TaskType.REDUCE, 2, 3);
  TaskCompletionEvent template = new TaskCompletionEvent(12, taid, 13, true,
      Status.SUCCEEDED, "httptracker");
  TaskCompletionEvent testEl = TaskCompletionEvent.downgrade(template);
  testEl.setTaskAttemptId(taid);
  testEl.setTaskTrackerHttp("httpTracker");

  testEl.setTaskId("attempt_001_0001_m_000002_04");
  assertEquals("attempt_001_0001_m_000002_4",testEl.getTaskId());

  testEl.setTaskStatus(Status.OBSOLETE);
  assertEquals(Status.OBSOLETE.toString(), testEl.getStatus().toString());

  testEl.setTaskRunTime(20);
  assertEquals(testEl.getTaskRunTime(), 20);
  testEl.setEventId(16);
  assertEquals(testEl.getEventId(), 16);

}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:TestOldMethodsJobID.java

示例4: testCompletedTaskAttempt

/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){
  
  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);
  
  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
  
  
  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:TestCompletedTask.java

示例5: testTaskUpdated

/**
 * simple test TaskUpdatedEvent and TaskUpdated
 * 
 * @throws Exception
 */
@Test(timeout = 10000)
public void testTaskUpdated() throws Exception {
  JobID jid = new JobID("001", 1);
  TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
  TaskUpdatedEvent test = new TaskUpdatedEvent(tid, 1234L);
  assertEquals(test.getTaskId().toString(), tid.toString());
  assertEquals(test.getFinishTime(), 1234L);

}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:TestEvents.java

示例6: run

public void run() {
  try {
    TaskAttemptID reduceId = new TaskAttemptID(new TaskID(
        jobId, TaskType.REDUCE, taskId), 0);
    LOG.info("Starting task: " + reduceId);

    ReduceTask reduce = new ReduceTask(systemJobFile.toString(),
        reduceId, taskId, mapIds.size(), 1);
    reduce.setUser(UserGroupInformation.getCurrentUser().
        getShortUserName());
    setupChildMapredLocalDirs(reduce, localConf);
    reduce.setLocalMapFiles(mapOutputFiles);

    if (!Job.this.isInterrupted()) {
      reduce.setJobFile(localJobFile.toString());
      localConf.setUser(reduce.getUser());
      reduce.localizeConfiguration(localConf);
      reduce.setConf(localConf);
      try {
        reduce_tasks.getAndIncrement();
        myMetrics.launchReduce(reduce.getTaskID());
        reduce.run(localConf, Job.this);
        myMetrics.completeReduce(reduce.getTaskID());
      } finally {
        reduce_tasks.getAndDecrement();
      }

      LOG.info("Finishing task: " + reduceId);
    } else {
      throw new InterruptedException();
    }
  } catch (Throwable t) {
    // store this to be rethrown in the initial thread context.
    this.storedException = t;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:36,代碼來源:LocalJobRunner.java

示例7: testTipFailed

@SuppressWarnings("rawtypes")
@Test
public void testTipFailed() throws Exception {
  JobConf job = new JobConf();
  job.setNumMapTasks(2);

  TaskStatus status = new TaskStatus() {
    @Override
    public boolean getIsMap() {
      return false;
    }

    @Override
    public void addFetchFailedMap(TaskAttemptID mapTaskId) {
    }
  };
  Progress progress = new Progress();

  TaskAttemptID reduceId = new TaskAttemptID("314159", 0, TaskType.REDUCE,
      0, 0);
  ShuffleSchedulerImpl scheduler = new ShuffleSchedulerImpl(job, status,
      reduceId, null, progress, null, null, null);

  JobID jobId = new JobID();
  TaskID taskId1 = new TaskID(jobId, TaskType.REDUCE, 1);
  scheduler.tipFailed(taskId1);

  Assert.assertEquals("Progress should be 0.5", 0.5f, progress.getProgress(),
      0.0f);
  Assert.assertFalse(scheduler.waitUntilDone(1));

  TaskID taskId0 = new TaskID(jobId, TaskType.REDUCE, 0);
  scheduler.tipFailed(taskId0);
  Assert.assertEquals("Progress should be 1.0", 1.0f, progress.getProgress(),
      0.0f);
  Assert.assertTrue(scheduler.waitUntilDone(1));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:37,代碼來源:TestShuffleScheduler.java

示例8: processEventForJobSummary

public void processEventForJobSummary(HistoryEvent event, JobSummary summary, 
    JobId jobId) {
  // context.getJob could be used for some of this info as well.
  switch (event.getEventType()) {
  case JOB_SUBMITTED:
    JobSubmittedEvent jse = (JobSubmittedEvent) event;
    summary.setUser(jse.getUserName());
    summary.setQueue(jse.getJobQueueName());
    summary.setJobSubmitTime(jse.getSubmitTime());
    summary.setJobName(jse.getJobName());
    break;
  case NORMALIZED_RESOURCE:
    NormalizedResourceEvent normalizedResourceEvent = 
          (NormalizedResourceEvent) event;
    if (normalizedResourceEvent.getTaskType() == TaskType.MAP) {
      summary.setResourcesPerMap(normalizedResourceEvent.getMemory());
    } else if (normalizedResourceEvent.getTaskType() == TaskType.REDUCE) {
      summary.setResourcesPerReduce(normalizedResourceEvent.getMemory());
    }
    break;  
  case JOB_INITED:
    JobInitedEvent jie = (JobInitedEvent) event;
    summary.setJobLaunchTime(jie.getLaunchTime());
    break;
  case MAP_ATTEMPT_STARTED:
    TaskAttemptStartedEvent mtase = (TaskAttemptStartedEvent) event;
    if (summary.getFirstMapTaskLaunchTime() == 0)
      summary.setFirstMapTaskLaunchTime(mtase.getStartTime());
    break;
  case REDUCE_ATTEMPT_STARTED:
    TaskAttemptStartedEvent rtase = (TaskAttemptStartedEvent) event;
    if (summary.getFirstReduceTaskLaunchTime() == 0)
      summary.setFirstReduceTaskLaunchTime(rtase.getStartTime());
    break;
  case JOB_FINISHED:
    JobFinishedEvent jfe = (JobFinishedEvent) event;
    summary.setJobFinishTime(jfe.getFinishTime());
    summary.setNumFinishedMaps(jfe.getFinishedMaps());
    summary.setNumFailedMaps(jfe.getFailedMaps());
    summary.setNumFinishedReduces(jfe.getFinishedReduces());
    summary.setNumFailedReduces(jfe.getFailedReduces());
    if (summary.getJobStatus() == null)
      summary
          .setJobStatus(org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED
              .toString());
    // TODO JOB_FINISHED does not have state. Effectively job history does not
    // have state about the finished job.
    setSummarySlotSeconds(summary, jfe.getTotalCounters());
    break;
  case JOB_FAILED:
  case JOB_KILLED:
    JobUnsuccessfulCompletionEvent juce = (JobUnsuccessfulCompletionEvent) event;
    summary.setJobStatus(juce.getStatus());
    summary.setNumFinishedMaps(context.getJob(jobId).getTotalMaps());
    summary.setNumFinishedReduces(context.getJob(jobId).getTotalReduces());
    summary.setJobFinishTime(juce.getFinishTime());
    setSummarySlotSeconds(summary, context.getJob(jobId).getAllCounters());
    break;
  default:
    break;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:62,代碼來源:JobHistoryEventHandler.java

示例9: MergeQueue

public MergeQueue(Configuration conf, FileSystem fs, 
                  Path[] inputs, boolean deleteInputs, 
                  CompressionCodec codec, RawComparator<K> comparator,
                  Progressable reporter) 
throws IOException {
  this(conf, fs, inputs, deleteInputs, codec, comparator, reporter, null,
      TaskType.REDUCE);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:Merger.java

示例10: TaskID

public TaskID() {
  super(new JobID(), TaskType.REDUCE, 0);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:3,代碼來源:TaskID.java

示例11: testPartialOutputCleanup

@Test
public void testPartialOutputCleanup()
    throws FileNotFoundException, IllegalArgumentException, IOException {

  Configuration conf = new Configuration(false);
  conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 1);
  TaskAttemptID tid0 =
    new TaskAttemptID("1363718006656", 1, TaskType.REDUCE, 14, 3);

  Path p = spy(new Path("/user/hadoop/out"));
  Path a = new Path("hdfs://user/hadoop/out");
  Path p0 = new Path(a, "_temporary/1/attempt_1363718006656_0001_r_000014_0");
  Path p1 = new Path(a, "_temporary/1/attempt_1363718006656_0001_r_000014_1");
  Path p2 = new Path(a, "_temporary/1/attempt_1363718006656_0001_r_000013_0");
  // (p3 does not exist)
  Path p3 = new Path(a, "_temporary/1/attempt_1363718006656_0001_r_000014_2");

  FileStatus[] fsa = new FileStatus[3];
  fsa[0] = new FileStatus();
  fsa[0].setPath(p0);
  fsa[1] = new FileStatus();
  fsa[1].setPath(p1);
  fsa[2] = new FileStatus();
  fsa[2].setPath(p2);

  final FileSystem fs = mock(FileSystem.class);
  when(fs.exists(eq(p0))).thenReturn(true);
  when(fs.exists(eq(p1))).thenReturn(true);
  when(fs.exists(eq(p2))).thenReturn(true);
  when(fs.exists(eq(p3))).thenReturn(false);
  when(fs.delete(eq(p0), eq(true))).thenReturn(true);
  when(fs.delete(eq(p1), eq(true))).thenReturn(true);
  doReturn(fs).when(p).getFileSystem(any(Configuration.class));
  when(fs.makeQualified(eq(p))).thenReturn(a);

  TaskAttemptContext context = mock(TaskAttemptContext.class);
  when(context.getTaskAttemptID()).thenReturn(tid0);
  when(context.getConfiguration()).thenReturn(conf);

  PartialFileOutputCommitter foc = new TestPFOC(p, context, fs);

  foc.cleanUpPartialOutputForTask(context);
  verify(fs).delete(eq(p0), eq(true));
  verify(fs).delete(eq(p1), eq(true));
  verify(fs, never()).delete(eq(p3), eq(true));
  verify(fs, never()).delete(eq(p2), eq(true));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:47,代碼來源:TestPreemptableFileOutputCommitter.java

示例12: testConsecutiveFetch

@Test
public void testConsecutiveFetch()
    throws IOException, InterruptedException {
  final int MAX_EVENTS_TO_FETCH = 100;
  TaskAttemptID tid = new TaskAttemptID("12345", 1, TaskType.REDUCE, 1, 1);

  TaskUmbilicalProtocol umbilical = mock(TaskUmbilicalProtocol.class);
  when(umbilical.getMapCompletionEvents(any(JobID.class),
      anyInt(), anyInt(), any(TaskAttemptID.class)))
    .thenReturn(getMockedCompletionEventsUpdate(0, 0));
  when(umbilical.getMapCompletionEvents(any(JobID.class),
      eq(0), eq(MAX_EVENTS_TO_FETCH), eq(tid)))
    .thenReturn(getMockedCompletionEventsUpdate(0, MAX_EVENTS_TO_FETCH));
  when(umbilical.getMapCompletionEvents(any(JobID.class),
      eq(MAX_EVENTS_TO_FETCH), eq(MAX_EVENTS_TO_FETCH), eq(tid)))
    .thenReturn(getMockedCompletionEventsUpdate(MAX_EVENTS_TO_FETCH,
        MAX_EVENTS_TO_FETCH));
  when(umbilical.getMapCompletionEvents(any(JobID.class),
      eq(MAX_EVENTS_TO_FETCH*2), eq(MAX_EVENTS_TO_FETCH), eq(tid)))
    .thenReturn(getMockedCompletionEventsUpdate(MAX_EVENTS_TO_FETCH*2, 3));

  @SuppressWarnings("unchecked")
  ShuffleScheduler<String,String> scheduler =
    mock(ShuffleScheduler.class);
  ExceptionReporter reporter = mock(ExceptionReporter.class);

  EventFetcherForTest<String,String> ef =
      new EventFetcherForTest<String,String>(tid, umbilical, scheduler,
          reporter, MAX_EVENTS_TO_FETCH);
  ef.getMapCompletionEvents();

  verify(reporter, never()).reportException(any(Throwable.class));
  InOrder inOrder = inOrder(umbilical);
  inOrder.verify(umbilical).getMapCompletionEvents(any(JobID.class),
      eq(0), eq(MAX_EVENTS_TO_FETCH), eq(tid));
  inOrder.verify(umbilical).getMapCompletionEvents(any(JobID.class),
      eq(MAX_EVENTS_TO_FETCH), eq(MAX_EVENTS_TO_FETCH), eq(tid));
  inOrder.verify(umbilical).getMapCompletionEvents(any(JobID.class),
      eq(MAX_EVENTS_TO_FETCH*2), eq(MAX_EVENTS_TO_FETCH), eq(tid));
  verify(scheduler, times(MAX_EVENTS_TO_FETCH*2 + 3)).resolve(
      any(TaskCompletionEvent.class));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:42,代碼來源:TestEventFetcher.java

示例13: TaskAttemptID

/**
  * Constructs a TaskId object from given parts.
  * @param jtIdentifier jobTracker identifier
  * @param jobId job number 
  * @param isMap whether the tip is a map 
  * @param taskId taskId number
  * @param id the task attempt number
  * @deprecated Use {@link #TaskAttemptID(String, int, TaskType, int, int)}.
  */
 @Deprecated
 public TaskAttemptID(String jtIdentifier, int jobId, boolean isMap, 
     int taskId, int id) {
   this(jtIdentifier, jobId, isMap ? TaskType.MAP : TaskType.REDUCE, taskId,
id);
 }
 
開發者ID:naver,項目名稱:hadoop,代碼行數:15,代碼來源:TaskAttemptID.java


注:本文中的org.apache.hadoop.mapreduce.TaskType.REDUCE屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。