当前位置: 首页>>代码示例>>Java>>正文


Java TaskAttemptID类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.TaskAttemptID的典型用法代码示例。如果您正苦于以下问题:Java TaskAttemptID类的具体用法?Java TaskAttemptID怎么用?Java TaskAttemptID使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TaskAttemptID类属于org.apache.hadoop.mapreduce包,在下文中一共展示了TaskAttemptID类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setup

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
@Before
@SuppressWarnings("unchecked") // mocked generics
public void setup() {
  LOG.info(">>>> " + name.getMethodName());
  job = new JobConf();
  job.setBoolean(MRJobConfig.SHUFFLE_FETCH_RETRY_ENABLED, false);
  jobWithRetry = new JobConf();
  jobWithRetry.setBoolean(MRJobConfig.SHUFFLE_FETCH_RETRY_ENABLED, true);
  id = TaskAttemptID.forName("attempt_0_1_r_1_1");
  ss = mock(ShuffleSchedulerImpl.class);
  mm = mock(MergeManagerImpl.class);
  r = mock(Reporter.class);
  metrics = mock(ShuffleClientMetrics.class);
  except = mock(ExceptionReporter.class);
  key = JobTokenSecretManager.createSecretKey(new byte[]{0,0,0,0});
  connection = mock(HttpURLConnection.class);

  allErrs = mock(Counters.Counter.class);
  when(r.getCounter(anyString(), anyString())).thenReturn(allErrs);

  ArrayList<TaskAttemptID> maps = new ArrayList<TaskAttemptID>(1);
  maps.add(map1ID);
  maps.add(map2ID);
  when(ss.getMapsForHost(host)).thenReturn(maps);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestFetcher.java

示例2: reduce

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
public void reduce(Text key, Iterable<IntWritable> values, 
                       Context context) throws IOException, InterruptedException {
  // Make one reducer slower for speculative execution
  TaskAttemptID taid = context.getTaskAttemptID();
  long sleepTime = 100;
  Configuration conf = context.getConfiguration();
  boolean test_speculate_reduce =
            conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);

  // IF TESTING REDUCE SPECULATIVE EXECUTION:
  //   Make the "*_r_000000_0" attempt take much longer than the others.
  //   When speculative execution is enabled, this should cause the attempt
  //   to be killed and restarted. At that point, the attempt ID will be
  //   "*_r_000000_1", so sleepTime will still remain 100ms.
  if ( (taid.getTaskType() == TaskType.REDUCE) && test_speculate_reduce
        && (taid.getTaskID().getId() == 0) && (taid.getId() == 0)) {
    sleepTime = 10000;
  }
  try{
    Thread.sleep(sleepTime);
  } catch(InterruptedException ie) {
    // Ignore
  }
  context.write(key,new IntWritable(0));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestSpeculativeExecution.java

示例3: setDatum

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
public void setDatum(Object odatum) {
  this.datum =
      (TaskAttemptUnsuccessfulCompletion)odatum;
  this.attemptId =
      TaskAttemptID.forName(datum.attemptId.toString());
  this.taskType =
      TaskType.valueOf(datum.taskType.toString());
  this.finishTime = datum.finishTime;
  this.hostname = datum.hostname.toString();
  this.rackName = datum.rackname.toString();
  this.port = datum.port;
  this.status = datum.status.toString();
  this.error = datum.error.toString();
  this.counters =
      EventReader.fromAvro(datum.counters);
  this.clockSplits =
      AvroArrayUtils.fromAvro(datum.clockSplits);
  this.cpuUsages =
      AvroArrayUtils.fromAvro(datum.cpuUsages);
  this.gpuUsages =
      AvroArrayUtils.fromAvro(datum.gpuUsages);
  this.vMemKbytes =
      AvroArrayUtils.fromAvro(datum.vMemKbytes);
  this.physMemKbytes =
      AvroArrayUtils.fromAvro(datum.physMemKbytes);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TaskAttemptUnsuccessfulCompletionEvent.java

示例4: setDatum

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
public void setDatum(Object odatum) {
  this.datum = (TaskFailed)odatum;
  this.id =
      TaskID.forName(datum.taskid.toString());
  this.taskType =
      TaskType.valueOf(datum.taskType.toString());
  this.finishTime = datum.finishTime;
  this.error = datum.error.toString();
  this.failedDueToAttempt =
      datum.failedDueToAttempt == null
      ? null
      : TaskAttemptID.forName(
          datum.failedDueToAttempt.toString());
  this.status = datum.status.toString();
  this.counters =
      EventReader.fromAvro(datum.counters);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TaskFailedEvent.java

示例5: setDatum

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
public void setDatum(Object oDatum) {
  this.datum = (MapAttemptFinished)oDatum;
  this.attemptId = TaskAttemptID.forName(datum.attemptId.toString());
  this.taskType = TaskType.valueOf(datum.taskType.toString());
  this.taskStatus = datum.taskStatus.toString();
  this.mapFinishTime = datum.mapFinishTime;
  this.finishTime = datum.finishTime;
  this.hostname = datum.hostname.toString();
  this.rackName = datum.rackname.toString();
  this.port = datum.port;
  this.state = datum.state.toString();
  this.counters = EventReader.fromAvro(datum.counters);
  this.clockSplits = AvroArrayUtils.fromAvro(datum.clockSplits);
  this.cpuUsages = AvroArrayUtils.fromAvro(datum.cpuUsages);
  this.gpuUsages = AvroArrayUtils.fromAvro(datum.gpuUsages);
  this.vMemKbytes = AvroArrayUtils.fromAvro(datum.vMemKbytes);
  this.physMemKbytes = AvroArrayUtils.fromAvro(datum.physMemKbytes);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:MapAttemptFinishedEvent.java

示例6: maybeEmitEvent

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
HistoryEvent maybeEmitEvent(ParsedLine line, String taskAttemptIDName,
    HistoryEventEmitter thatg) {
  if (taskAttemptIDName == null) {
    return null;
  }

  TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskAttemptIDName);

  String finishTime = line.get("FINISH_TIME");
  String status = line.get("TASK_STATUS");

  if (finishTime != null && status != null
      && status.equalsIgnoreCase("success")) {
    String hostName = line.get("HOSTNAME");
    String counters = line.get("COUNTERS");
    String state = line.get("STATE_STRING");
    String shuffleFinish = line.get("SHUFFLE_FINISHED");
    String sortFinish = line.get("SORT_FINISHED");

    if (shuffleFinish != null && sortFinish != null
        && "success".equalsIgnoreCase(status)) {
      ReduceAttempt20LineHistoryEventEmitter that =
          (ReduceAttempt20LineHistoryEventEmitter) thatg;

      return new ReduceAttemptFinishedEvent
        (taskAttemptID,
         that.originalTaskType, status,
         Long.parseLong(shuffleFinish),
         Long.parseLong(sortFinish),
         Long.parseLong(finishTime),
         hostName, -1, null,
         state, maybeParseCounters(counters),
         null);
    }
  }

  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:ReduceAttempt20LineHistoryEventEmitter.java

示例7: maskAttemptID

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
/**
 * Mask the job ID part in a {@link TaskAttemptID}.
 * 
 * @param attemptId
 *          raw {@link TaskAttemptID} read from trace
 * @return masked {@link TaskAttemptID} with empty {@link JobID}.
 */
private TaskAttemptID maskAttemptID(TaskAttemptID attemptId) {
  JobID jobId = new JobID();
  TaskType taskType = attemptId.getTaskType();
  TaskID taskId = attemptId.getTaskID();
  return new TaskAttemptID(jobId.getJtIdentifier(), jobId.getId(), taskType,
      taskId.getId(), attemptId.getId());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:ZombieJob.java

示例8: getLocalLogDir

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
public static String getLocalLogDir(TaskAttemptID attemptId) {
    int tid = attemptId.getTaskID().getId();
    int aid = attemptId.getId();
    String jid = attemptId.getJobID().toString();
    StringBuilder sb = new StringBuilder(jid).append('-');
    sb.append(tid).append('-').append(aid);
    String localLogDir = sb.toString();
    return localLogDir;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:10,代码来源:DirectNetezzaManager.java

示例9: getTaskDiagnostics

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
public String[] getTaskDiagnostics(org.apache.hadoop.mapreduce.TaskAttemptID arg0)
    throws IOException, InterruptedException {

  org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter
      .toYarn(arg0);
  GetDiagnosticsRequest request = recordFactory
      .newRecordInstance(GetDiagnosticsRequest.class);
  request.setTaskAttemptId(attemptID);
  List<String> list = ((GetDiagnosticsResponse) invoke("getDiagnostics",
      GetDiagnosticsRequest.class, request)).getDiagnosticsList();
  String[] result = new String[list.size()];
  int i = 0;
  for (String c : list) {
    result[i++] = c.toString();
  }
  return result;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:ClientServiceDelegate.java

示例10: maybeEmitEvent

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
HistoryEvent maybeEmitEvent(ParsedLine line, String taskAttemptIDName,
    HistoryEventEmitter thatg) {
  if (taskAttemptIDName == null) {
    return null;
  }

  TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskAttemptIDName);

  String startTime = line.get("START_TIME");
  String taskType = line.get("TASK_TYPE");
  String trackerName = line.get("TRACKER_NAME");
  String httpPort = line.get("HTTP_PORT");
  String locality = line.get("LOCALITY");
  if (locality == null) {
    locality = "";
  }
  String avataar = line.get("AVATAAR");
  if (avataar == null) {
    avataar = "";
  }

  if (startTime != null && taskType != null) {
    TaskAttempt20LineEventEmitter that =
        (TaskAttempt20LineEventEmitter) thatg;

    that.originalStartTime = Long.parseLong(startTime);
    that.originalTaskType =
        Version20LogInterfaceUtils.get20TaskType(taskType);

    int port =
        httpPort.equals("") ? DEFAULT_HTTP_PORT : Integer
            .parseInt(httpPort);

    return new TaskAttemptStartedEvent(taskAttemptID,
        that.originalTaskType, that.originalStartTime, trackerName, port, -1,
        locality, avataar);
  }

  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TaskAttempt20LineEventEmitter.java

示例11: testReinit

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
@Test
public void testReinit() throws Exception {
  // Test that a split containing multiple files works correctly,
  // with the child RecordReader getting its initialize() method
  // called a second time.
  TaskAttemptID taskId = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 0);
  Configuration conf = new Configuration();
  TaskAttemptContext context = new TaskAttemptContextImpl(conf, taskId);

  // This will create a CombineFileRecordReader that itself contains a
  // DummyRecordReader.
  InputFormat inputFormat = new ChildRRInputFormat();

  Path [] files = { new Path("file1"), new Path("file2") };
  long [] lengths = { 1, 1 };

  CombineFileSplit split = new CombineFileSplit(files, lengths);
  RecordReader rr = inputFormat.createRecordReader(split, context);
  assertTrue("Unexpected RR type!", rr instanceof CombineFileRecordReader);

  // first initialize() call comes from MapTask. We'll do it here.
  rr.initialize(split, context);

  // First value is first filename.
  assertTrue(rr.nextKeyValue());
  assertEquals("file1", rr.getCurrentValue().toString());

  // The inner RR will return false, because it only emits one (k, v) pair.
  // But there's another sub-split to process. This returns true to us.
  assertTrue(rr.nextKeyValue());
  
  // And the 2nd rr will have its initialize method called correctly.
  assertEquals("file2", rr.getCurrentValue().toString());
  
  // But after both child RR's have returned their singleton (k, v), this
  // should also return false.
  assertFalse(rr.nextKeyValue());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestCombineFileInputFormat.java

示例12: getMapOutputURL

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
/**
 * Create the map-output-url. This will contain all the map ids
 * separated by commas
 * @param host
 * @param maps
 * @return
 * @throws MalformedURLException
 */
private URL getMapOutputURL(MapHost host, Collection<TaskAttemptID> maps
                            )  throws MalformedURLException {
  // Get the base url
  StringBuffer url = new StringBuffer(host.getBaseUrl());
  
  boolean first = true;
  for (TaskAttemptID mapId : maps) {
    if (!first) {
      url.append(",");
    }
    url.append(mapId);
    first = false;
  }
 
  LOG.debug("MapOutput URL for " + host + " -> " + url.toString());
  return new URL(url.toString());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:Fetcher.java

示例13: getRecordWriter

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
/** {@inheritDoc} */
public RecordWriter<K, V> getRecordWriter(FileSystem filesystem,
    JobConf job, String name, Progressable progress) throws IOException {
  org.apache.hadoop.mapreduce.RecordWriter<K, V> w = super.getRecordWriter(
    new TaskAttemptContextImpl(job, 
          TaskAttemptID.forName(job.get(MRJobConfig.TASK_ATTEMPT_ID))));
  org.apache.hadoop.mapreduce.lib.db.DBOutputFormat.DBRecordWriter writer = 
   (org.apache.hadoop.mapreduce.lib.db.DBOutputFormat.DBRecordWriter) w;
  try {
    return new DBRecordWriter(writer.getConnection(), writer.getStatement());
  } catch(SQLException se) {
    throw new IOException(se);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:DBOutputFormat.java

示例14: FakeFetcher

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
public FakeFetcher(JobConf job, TaskAttemptID reduceId,
    ShuffleSchedulerImpl<K,V> scheduler, MergeManagerImpl<K,V> merger,
    Reporter reporter, ShuffleClientMetrics metrics,
    ExceptionReporter exceptionReporter, SecretKey jobTokenSecret,
    HttpURLConnection connection) {
  super(job, reduceId, scheduler, merger, reporter, metrics,
      exceptionReporter, jobTokenSecret);
  this.connection = connection;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestFetcher.java

示例15: addKnownMapOutput

import org.apache.hadoop.mapreduce.TaskAttemptID; //导入依赖的package包/类
public synchronized void addKnownMapOutput(String hostName,
                                           String hostUrl,
                                           TaskAttemptID mapId) {
  MapHost host = mapLocations.get(hostName);
  if (host == null) {
    host = new MapHost(hostName, hostUrl);
    mapLocations.put(hostName, host);
  }
  host.addKnownMap(mapId);

  // Mark the host as pending
  if (host.getState() == State.PENDING) {
    pendingHosts.add(host);
    notifyAll();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:ShuffleSchedulerImpl.java


注:本文中的org.apache.hadoop.mapreduce.TaskAttemptID类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。