當前位置: 首頁>>代碼示例>>Java>>正文


Java TaskType.MAP屬性代碼示例

本文整理匯總了Java中org.apache.hadoop.mapreduce.TaskType.MAP屬性的典型用法代碼示例。如果您正苦於以下問題:Java TaskType.MAP屬性的具體用法?Java TaskType.MAP怎麽用?Java TaskType.MAP使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在org.apache.hadoop.mapreduce.TaskType的用法示例。


在下文中一共展示了TaskType.MAP屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: map

public void map(Object key, Text value, Context context)
        throws IOException, InterruptedException {
  // Make one mapper slower for speculative execution
  TaskAttemptID taid = context.getTaskAttemptID();
  long sleepTime = 100;
  Configuration conf = context.getConfiguration();
  boolean test_speculate_map =
          conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false);

  // IF TESTING MAPPER SPECULATIVE EXECUTION:
  //   Make the "*_m_000000_0" attempt take much longer than the others.
  //   When speculative execution is enabled, this should cause the attempt
  //   to be killed and restarted. At that point, the attempt ID will be
  //   "*_m_000000_1", so sleepTime will still remain 100ms.
  if ( (taid.getTaskType() == TaskType.MAP) && test_speculate_map
        && (taid.getTaskID().getId() == 0) && (taid.getId() == 0)) {
    sleepTime = 10000;
  }
  try{
    Thread.sleep(sleepTime);
  } catch(InterruptedException ie) {
    // Ignore
  }
  context.write(value, new IntWritable(1));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:TestSpeculativeExecution.java

示例2: createSplits

private List<InputSplit> createSplits(JobContext jobContext, List<DynamicInputChunk> chunks) throws IOException {
  int numMaps = getNumMapTasks(jobContext.getConfiguration());

  final int nSplits = Math.min(numMaps, chunks.size());
  List<InputSplit> splits = new ArrayList<>(nSplits);

  for (int i = 0; i < nSplits; ++i) {
    TaskID taskId = new TaskID(jobContext.getJobID(), TaskType.MAP, i);
    chunks.get(i).assignTo(taskId);
    splits.add(new FileSplit(chunks.get(i).getPath(), 0,
        // Setting non-zero length for FileSplit size, to avoid a possible
        // future when 0-sized file-splits are considered "empty" and skipped
        // over.
        getMinRecordsPerChunk(jobContext.getConfiguration()), null));
  }
  ConfigurationUtil.publish(jobContext.getConfiguration(), CONF_LABEL_NUM_SPLITS, splits.size());
  return splits;
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:18,代碼來源:DynamicInputFormat.java

示例3: runStreamJob

void runStreamJob(TaskType type, boolean isEmptyInput) throws IOException {
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(
      mr.createJobConf().get(JTConfig.JT_IPC_ADDRESS), map, reduce), mayExit);
  int returnValue = job.go();
  assertEquals(0, returnValue);

  // If input to reducer is empty, dummy reporter(which ignores all
  // reporting lines) is set for MRErrorThread in waitOutputThreads(). So
  // expectedCounterValue is 0 for empty-input-to-reducer case.
  // Output of reducer is also empty for empty-input-to-reducer case.
  int expectedCounterValue = 0;
  if (type == TaskType.MAP || !isEmptyInput) {
    validateTaskStatus(job, type);
    // output is from "print STDOUT" statements in perl script
    validateJobOutput(job.getConf());
    expectedCounterValue = 2;
  }
  validateUserCounter(job, expectedCounterValue);
  validateTaskStderr(job, type);

  deleteOutDir(fs);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:23,代碼來源:TestStreamingStatus.java

示例4: createDummyTask

private Task createDummyTask(TaskType type) throws IOException, ClassNotFoundException,
InterruptedException {
  JobConf conf = new JobConf();
  conf.setOutputCommitter(CommitterThatAlwaysRequiresCommit.class);
  Path outDir = new Path(rootDir, "output"); 
  FileOutputFormat.setOutputPath(conf, outDir);
  JobID jobId = JobID.forName("job_201002121132_0001");
  Task testTask;
  if (type == TaskType.MAP) {
    testTask = new MapTask();
  } else {
    testTask = new ReduceTask();
  }
  testTask.setConf(conf);
  testTask.initialize(conf, jobId, Reporter.NULL, false);
  return testTask;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:TestTaskCommit.java

示例5: validateTaskStatus

void validateTaskStatus(StreamJob job, TaskType type) throws IOException {
  // Map Task has 2 phases: map, sort
  // Reduce Task has 3 phases: copy, sort, reduce
  String finalPhaseInTask;
  TaskReport[] reports;
  if (type == TaskType.MAP) {
    reports = job.jc_.getMapTaskReports(job.jobId_);
    finalPhaseInTask = "sort";
  } else {// reduce task
    reports = job.jc_.getReduceTaskReports(job.jobId_);
    finalPhaseInTask = "reduce";
  }
  assertEquals(1, reports.length);
  assertEquals(expectedStatus + " > " + finalPhaseInTask,
      reports[0].getState());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:16,代碼來源:TestStreamingStatus.java

示例6: testReinit

@Test
public void testReinit() throws Exception {
  // Test that a split containing multiple files works correctly,
  // with the child RecordReader getting its initialize() method
  // called a second time.
  TaskAttemptID taskId = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 0);
  Configuration conf = new Configuration();
  TaskAttemptContext context = new TaskAttemptContextImpl(conf, taskId);

  // This will create a CombineFileRecordReader that itself contains a
  // DummyRecordReader.
  InputFormat inputFormat = new ChildRRInputFormat();

  Path [] files = { new Path("file1"), new Path("file2") };
  long [] lengths = { 1, 1 };

  CombineFileSplit split = new CombineFileSplit(files, lengths);
  RecordReader rr = inputFormat.createRecordReader(split, context);
  assertTrue("Unexpected RR type!", rr instanceof CombineFileRecordReader);

  // first initialize() call comes from MapTask. We'll do it here.
  rr.initialize(split, context);

  // First value is first filename.
  assertTrue(rr.nextKeyValue());
  assertEquals("file1", rr.getCurrentValue().toString());

  // The inner RR will return false, because it only emits one (k, v) pair.
  // But there's another sub-split to process. This returns true to us.
  assertTrue(rr.nextKeyValue());
  
  // And the 2nd rr will have its initialize method called correctly.
  assertEquals("file2", rr.getCurrentValue().toString());
  
  // But after both child RR's have returned their singleton (k, v), this
  // should also return false.
  assertFalse(rr.nextKeyValue());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:38,代碼來源:TestCombineFileInputFormat.java

示例7: createOutputCommitter

private org.apache.hadoop.mapreduce.OutputCommitter 
createOutputCommitter(boolean newApiCommitter, JobID jobId, Configuration conf) throws Exception {
  org.apache.hadoop.mapreduce.OutputCommitter committer = null;

  LOG.info("OutputCommitter set in config "
      + conf.get("mapred.output.committer.class"));

  if (newApiCommitter) {
    org.apache.hadoop.mapreduce.TaskID taskId =
        new org.apache.hadoop.mapreduce.TaskID(jobId, TaskType.MAP, 0);
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID =
        new org.apache.hadoop.mapreduce.TaskAttemptID(taskId, 0);
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = 
        new TaskAttemptContextImpl(conf, taskAttemptID);
    OutputFormat outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), conf);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = ReflectionUtils.newInstance(conf.getClass(
        "mapred.output.committer.class", FileOutputCommitter.class,
        org.apache.hadoop.mapred.OutputCommitter.class), conf);
  }
  LOG.info("OutputCommitter is " + committer.getClass().getName());
  return committer;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:LocalJobRunner.java

示例8: getMockTaskAttemptInfo

private TaskAttemptInfo getMockTaskAttemptInfo(TaskAttemptID tai,
    TaskAttemptState tas) {

  ContainerId ci = mock(ContainerId.class);
  Counters counters = mock(Counters.class);
  TaskType tt = TaskType.MAP;

  long finishTime = System.currentTimeMillis();

  TaskAttemptInfo mockTAinfo = mock(TaskAttemptInfo.class);

  when(mockTAinfo.getAttemptId()).thenReturn(tai);
  when(mockTAinfo.getContainerId()).thenReturn(ci);
  when(mockTAinfo.getCounters()).thenReturn(counters);
  when(mockTAinfo.getError()).thenReturn("");
  when(mockTAinfo.getFinishTime()).thenReturn(finishTime);
  when(mockTAinfo.getHostname()).thenReturn("localhost");
  when(mockTAinfo.getHttpPort()).thenReturn(23);
  when(mockTAinfo.getMapFinishTime()).thenReturn(finishTime - 1000L);
  when(mockTAinfo.getPort()).thenReturn(24);
  when(mockTAinfo.getRackname()).thenReturn("defaultRack");
  when(mockTAinfo.getShuffleFinishTime()).thenReturn(finishTime - 2000L);
  when(mockTAinfo.getShufflePort()).thenReturn(25);
  when(mockTAinfo.getSortFinishTime()).thenReturn(finishTime - 3000L);
  when(mockTAinfo.getStartTime()).thenReturn(finishTime -10000);
  when(mockTAinfo.getState()).thenReturn("task in progress");
  when(mockTAinfo.getTaskStatus()).thenReturn(tas.toString());
  when(mockTAinfo.getTaskType()).thenReturn(tt);
  when(mockTAinfo.getTrackerName()).thenReturn("TrackerName");
  return mockTAinfo;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:31,代碼來源:TestRecovery.java

示例9: run

public void run() {
  try {
    TaskAttemptID mapId = new TaskAttemptID(new TaskID(
        jobId, TaskType.MAP, taskId), 0);
    LOG.info("Starting task: " + mapId);
    mapIds.add(mapId);
    MapTask map = new MapTask(systemJobFile.toString(), mapId, taskId,
      info.getSplitIndex(), 1);
    map.setUser(UserGroupInformation.getCurrentUser().
        getShortUserName());
    setupChildMapredLocalDirs(map, localConf);

    MapOutputFile mapOutput = new MROutputFiles();
    mapOutput.setConf(localConf);
    mapOutputFiles.put(mapId, mapOutput);

    map.setJobFile(localJobFile.toString());
    localConf.setUser(map.getUser());
    map.localizeConfiguration(localConf);
    map.setConf(localConf);
    try {
      map_tasks.getAndIncrement();
      myMetrics.launchMap(mapId);
      map.run(localConf, Job.this);
      myMetrics.completeMap(mapId);
    } finally {
      map_tasks.getAndDecrement();
    }

    LOG.info("Finishing task: " + mapId);
  } catch (Throwable e) {
    this.storedException = e;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:34,代碼來源:LocalJobRunner.java

示例10: MergeQueue

public MergeQueue(Configuration conf, FileSystem fs, 
                  Path[] inputs, boolean deleteInputs, 
                  CompressionCodec codec, RawComparator<K> comparator,
                  Progressable reporter, 
                  Counters.Counter mergedMapOutputsCounter,
                  TaskType taskType) 
throws IOException {
  this.conf = conf;
  this.fs = fs;
  this.codec = codec;
  this.comparator = comparator;
  this.reporter = reporter;
  
  if (taskType == TaskType.MAP) {
    considerFinalMergeForProgress();
  }
  
  for (Path file : inputs) {
    LOG.debug("MergeQ: adding: " + file);
    segments.add(new Segment<K, V>(conf, fs, file, codec, !deleteInputs, 
                                   (file.toString().endsWith(
                                       Task.MERGED_OUTPUT_PREFIX) ? 
                                    null : mergedMapOutputsCounter)));
  }
  
  // Sort segments on file-lengths
  Collections.sort(segments, segmentComparator); 
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:28,代碼來源:Merger.java

示例11: getEventType

/** Get the event type */
public EventType getEventType() {
  // Note that the task type can be setup/map/reduce/cleanup but the 
  // attempt-type can only be map/reduce.
  return getTaskId().getTaskType() == TaskType.MAP 
         ? EventType.MAP_ATTEMPT_FINISHED
         : EventType.REDUCE_ATTEMPT_FINISHED;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:TaskAttemptFinishedEvent.java

示例12: getEventType

/** Get the event type */
public EventType getEventType() {
  // Note that the task type can be setup/map/reduce/cleanup but the 
  // attempt-type can only be map/reduce.
 return getTaskId().getTaskType() == TaskType.MAP 
         ? EventType.MAP_ATTEMPT_STARTED 
         : EventType.REDUCE_ATTEMPT_STARTED;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:TaskAttemptStartedEvent.java

示例13: testTaskID

@Test(timeout = 5000)
public void testTaskID() throws IOException, InterruptedException {
  JobID jobid = new JobID("1014873536921", 6);
  TaskID tid = new TaskID(jobid, TaskType.MAP, 0);
  org.apache.hadoop.mapred.TaskID tid1 =
      org.apache.hadoop.mapred.TaskID.downgrade(tid);
  org.apache.hadoop.mapred.TaskReport treport =
      new org.apache.hadoop.mapred.TaskReport(tid1, 0.0f,
        State.FAILED.toString(), null, TIPStatus.FAILED, 100, 100,
        new org.apache.hadoop.mapred.Counters());
  Assert
    .assertEquals(treport.getTaskId(), "task_1014873536921_0006_m_000000");
  Assert.assertEquals(treport.getTaskID().toString(),
    "task_1014873536921_0006_m_000000");
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:15,代碼來源:TestJobInfo.java

示例14: processEventForJobSummary

public void processEventForJobSummary(HistoryEvent event, JobSummary summary, 
    JobId jobId) {
  // context.getJob could be used for some of this info as well.
  switch (event.getEventType()) {
  case JOB_SUBMITTED:
    JobSubmittedEvent jse = (JobSubmittedEvent) event;
    summary.setUser(jse.getUserName());
    summary.setQueue(jse.getJobQueueName());
    summary.setJobSubmitTime(jse.getSubmitTime());
    summary.setJobName(jse.getJobName());
    break;
  case NORMALIZED_RESOURCE:
    NormalizedResourceEvent normalizedResourceEvent = 
          (NormalizedResourceEvent) event;
    if (normalizedResourceEvent.getTaskType() == TaskType.MAP) {
      summary.setResourcesPerMap(normalizedResourceEvent.getMemory());
    } else if (normalizedResourceEvent.getTaskType() == TaskType.REDUCE) {
      summary.setResourcesPerReduce(normalizedResourceEvent.getMemory());
    }
    break;  
  case JOB_INITED:
    JobInitedEvent jie = (JobInitedEvent) event;
    summary.setJobLaunchTime(jie.getLaunchTime());
    break;
  case MAP_ATTEMPT_STARTED:
    TaskAttemptStartedEvent mtase = (TaskAttemptStartedEvent) event;
    if (summary.getFirstMapTaskLaunchTime() == 0)
      summary.setFirstMapTaskLaunchTime(mtase.getStartTime());
    break;
  case REDUCE_ATTEMPT_STARTED:
    TaskAttemptStartedEvent rtase = (TaskAttemptStartedEvent) event;
    if (summary.getFirstReduceTaskLaunchTime() == 0)
      summary.setFirstReduceTaskLaunchTime(rtase.getStartTime());
    break;
  case JOB_FINISHED:
    JobFinishedEvent jfe = (JobFinishedEvent) event;
    summary.setJobFinishTime(jfe.getFinishTime());
    summary.setNumFinishedMaps(jfe.getFinishedMaps());
    summary.setNumFailedMaps(jfe.getFailedMaps());
    summary.setNumFinishedReduces(jfe.getFinishedReduces());
    summary.setNumFailedReduces(jfe.getFailedReduces());
    if (summary.getJobStatus() == null)
      summary
          .setJobStatus(org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED
              .toString());
    // TODO JOB_FINISHED does not have state. Effectively job history does not
    // have state about the finished job.
    setSummarySlotSeconds(summary, jfe.getTotalCounters());
    break;
  case JOB_FAILED:
  case JOB_KILLED:
    JobUnsuccessfulCompletionEvent juce = (JobUnsuccessfulCompletionEvent) event;
    summary.setJobStatus(juce.getStatus());
    summary.setNumFinishedMaps(context.getJob(jobId).getTotalMaps());
    summary.setNumFinishedReduces(context.getJob(jobId).getTotalReduces());
    summary.setJobFinishTime(juce.getFinishTime());
    setSummarySlotSeconds(summary, context.getJob(jobId).getAllCounters());
    break;
  default:
    break;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:62,代碼來源:JobHistoryEventHandler.java

示例15: testResourceUsageMatcherRunner

/**
 * Test {@link LoadJob.ResourceUsageMatcherRunner}.
 */
@Test
@SuppressWarnings("unchecked")
public void testResourceUsageMatcherRunner() throws Exception {
  Configuration conf = new Configuration();
  FakeProgressive progress = new FakeProgressive();
  
  // set the resource calculator plugin
  conf.setClass(TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,
                DummyResourceCalculatorPlugin.class, 
                ResourceCalculatorPlugin.class);
  // set the resources
  // set the resource implementation class
  conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS, 
                TestResourceUsageEmulatorPlugin.class, 
                ResourceUsageEmulatorPlugin.class);
  
  long currentTime = System.currentTimeMillis();
  
  // initialize the matcher class
  TaskAttemptID id = new TaskAttemptID("test", 1, TaskType.MAP, 1, 1);
  StatusReporter reporter = new DummyReporter(progress);
  TaskInputOutputContext context = 
    new MapContextImpl(conf, id, null, null, null, reporter, null);
  FakeResourceUsageMatcherRunner matcher = 
    new FakeResourceUsageMatcherRunner(context, null);
  
  // check if the matcher initialized the plugin
  String identifier = TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
  long initTime = 
    TestResourceUsageEmulatorPlugin.testInitialization(identifier, conf);
  assertTrue("ResourceUsageMatcherRunner failed to initialize the"
             + " configured plugin", initTime > currentTime);
  
  // check the progress
  assertEquals("Progress mismatch in ResourceUsageMatcherRunner", 
               0, progress.getProgress(), 0D);
  
  // call match() and check progress
  progress.setProgress(0.01f);
  currentTime = System.currentTimeMillis();
  matcher.test();
  long emulateTime = 
    TestResourceUsageEmulatorPlugin.testEmulation(identifier, conf);
  assertTrue("ProgressBasedResourceUsageMatcher failed to load and emulate"
             + " the configured plugin", emulateTime > currentTime);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:49,代碼來源:TestResourceUsageEmulators.java


注:本文中的org.apache.hadoop.mapreduce.TaskType.MAP屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。