当前位置: 首页>>代码示例>>Java>>正文


Java TaskAttemptID类代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.TaskAttemptID的典型用法代码示例。如果您正苦于以下问题:Java TaskAttemptID类的具体用法?Java TaskAttemptID怎么用?Java TaskAttemptID使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TaskAttemptID类属于org.apache.hadoop.mapred包,在下文中一共展示了TaskAttemptID类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getTaskID

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
public static TaskID getTaskID(Configuration cfg) {
    // first try with the attempt since some Hadoop versions mix the two
    String taskAttemptId = HadoopCfgUtils.getTaskAttemptId(cfg);
    if (StringUtils.hasText(taskAttemptId)) {
        try {
            return TaskAttemptID.forName(taskAttemptId).getTaskID();
        } catch (IllegalArgumentException ex) {
            // the task attempt is invalid (Tez in particular uses the wrong string - see #346)
            // try to fallback to task id
            return parseTaskIdFromTaskAttemptId(taskAttemptId);
        }
    }
    String taskIdProp = HadoopCfgUtils.getTaskId(cfg);
    // double-check task id bug in Hadoop 2.5.x
    if (StringUtils.hasText(taskIdProp) && !taskIdProp.contains("attempt")) {
        return TaskID.forName(taskIdProp);
    }
    return null;
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:20,代码来源:HadoopCfgUtils.java

示例2: getFetchableLocations

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
/**
 * Only get the locations that are fetchable (not copied or not made
 * obsolete).
 *
 * @param copiedMapOutputs Synchronized set of already copied map outputs
 * @param obsoleteMapIdsSet Synchronized set of obsolete map ids
 * @return List of fetchable locations (could be empty)
 */
List<MapOutputLocation> getFetchableLocations(
    Set<TaskID> copiedMapOutputs,
    Set<TaskAttemptID> obsoleteMapIdsSet) {
  List<MapOutputLocation> fetchableLocations =
      new ArrayList<MapOutputLocation>(locations.size());
  for (MapOutputLocation location : locations) {
    // Check if we still need to copy the output from this location
    if (copiedMapOutputs.contains(location.getTaskId())) {
      location.errorType = CopyOutputErrorType.NO_ERROR;
      location.sizeRead = CopyResult.OBSOLETE;
      LOG.info("getFetchableLocations: Already " +
          "copied - " + location + ", will not try again");
    } else if (obsoleteMapIds.contains(location.getTaskAttemptId())) {
      location.errorType = CopyOutputErrorType.NO_ERROR;
      location.sizeRead = CopyResult.OBSOLETE;
      LOG.info("getFetchableLocations: Obsolete - " + location + ", " +
          "will not try now.");
    } else {
      fetchableLocations.add(location);
    }
  }
  return fetchableLocations;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:32,代码来源:ReduceTask.java

示例3: get

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
@Override @Nonnull
public List<Processor> get(int count) {
    return processorList = range(0, count).mapToObj(i -> {
        try {
            String uuid = context.jetInstance().getCluster().getLocalMember().getUuid();
            TaskAttemptID taskAttemptID = new TaskAttemptID("jet-node-" + uuid, jobContext.getJobID().getId(),
                    JOB_SETUP, i, 0);
            jobConf.set("mapred.task.id", taskAttemptID.toString());
            jobConf.setInt("mapred.task.partition", i);

            TaskAttemptContextImpl taskAttemptContext = new TaskAttemptContextImpl(jobConf, taskAttemptID);
            @SuppressWarnings("unchecked")
            OutputFormat<K, V> outFormat = jobConf.getOutputFormat();
            RecordWriter<K, V> recordWriter = outFormat.getRecordWriter(
                    null, jobConf, uuid + '-' + valueOf(i), Reporter.NULL);
            return new WriteHdfsP<>(
                    recordWriter, taskAttemptContext, outputCommitter, extractKeyFn, extractValueFn);
        } catch (IOException e) {
            throw new JetException(e);
        }

    }).collect(toList());
}
 
开发者ID:hazelcast,项目名称:hazelcast-jet,代码行数:24,代码来源:WriteHdfsP.java

示例4: commitTask

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
@Override
public void commitTask(TaskAttemptContext context) 
	throws IOException 
{
	JobConf conf = context.getJobConf();
	TaskAttemptID attemptId = context.getTaskAttemptID();
	
	// get the mapping between index to output filename
	outputs = MRJobConfiguration.getOutputs(conf);
	
	// get temp task output path (compatible with hadoop1 and hadoop2)
	Path taskOutPath = FileOutputFormat.getWorkOutputPath(conf);
	FileSystem fs = taskOutPath.getFileSystem(conf);
	if( !fs.exists(taskOutPath) )
		throw new IOException("Task output path "+ taskOutPath.toString() + "does not exist.");
	
	// move the task outputs to their final places
	context.getProgressible().progress();
	moveFinalTaskOutputs(context, fs, taskOutPath);
	
	// delete the temporary task-specific output directory
	if( !fs.delete(taskOutPath, true) ) 
		LOG.debug("Failed to delete the temporary output directory of task: " + attemptId + " - " + taskOutPath);
}
 
开发者ID:apache,项目名称:systemml,代码行数:25,代码来源:MultipleOutputCommitter.java

示例5: moveFileToDestination

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
private void moveFileToDestination(TaskAttemptContext context, FileSystem fs, Path file) 
	throws IOException 
{
	TaskAttemptID attemptId = context.getTaskAttemptID();
	
	// get output index and final destination 
	String name =  file.getName(); //e.g., 0-r-00000 
	int index = Integer.parseInt(name.substring(0, name.indexOf("-")));
	Path dest = new Path(outputs[index], name); //e.g., outX/0-r-00000
	
	// move file from 'file' to 'finalPath'
	if( !fs.rename(file, dest) ) {
		if (!fs.delete(dest, true))
			throw new IOException("Failed to delete earlier output " + dest + " for rename of " + file + " in task " + attemptId);
		if (!fs.rename(file, dest)) 
			throw new IOException("Failed to save output " + dest + " for rename of " + file + " in task: " + attemptId);
	}
}
 
开发者ID:apache,项目名称:systemml,代码行数:19,代码来源:MultipleOutputCommitter.java

示例6: HadoopV1OutputCollector

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
/**
 * @param jobConf Job configuration.
 * @param taskCtx Task context.
 * @param directWrite Direct write flag.
 * @param fileName File name.
 * @throws IOException In case of IO exception.
 */
HadoopV1OutputCollector(JobConf jobConf, HadoopTaskContext taskCtx, boolean directWrite,
    @Nullable String fileName, TaskAttemptID attempt) throws IOException {
    this.jobConf = jobConf;
    this.taskCtx = taskCtx;
    this.attempt = attempt;

    if (directWrite) {
        jobConf.set("mapreduce.task.attempt.id", attempt.toString());

        OutputFormat outFormat = jobConf.getOutputFormat();

        writer = outFormat.getRecordWriter(null, jobConf, fileName, Reporter.NULL);
    }
    else
        writer = null;
}
 
开发者ID:apache,项目名称:ignite,代码行数:24,代码来源:HadoopV1OutputCollector.java

示例7: testCountStar

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
@Test
public void testCountStar() throws IOException, InterruptedException {
  Configuration config = new Configuration();
  TextInputFormat.TextRecordReader reader =
      new TextInputFormat.TextRecordReader();

  try {
    RecordServiceConfig.setInputQuery(config, "select count(*) from tpch.nation");
    List<InputSplit> splits = PlanUtil.getSplits(config, new Credentials()).splits;
    int numRows = 0;
    for (InputSplit split: splits) {
      reader.initialize(split,
          new TaskAttemptContextImpl(new JobConf(config), new TaskAttemptID()));
      while (reader.nextKeyValue()) {
        ++numRows;
      }
    }
    assertEquals(25, numRows);
  } finally {
    reader.close();
  }
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:23,代码来源:MapReduceTest.java

示例8: instantiateTaskAttemptContext

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
public static TaskAttemptContext instantiateTaskAttemptContext(JobConf jobConf,  TaskAttemptID taskAttemptID) throws Exception {
	try {
		// for Hadoop 1.xx
		Class<?> clazz = null;
		if(!TaskAttemptContext.class.isInterface()) { 
			clazz = Class.forName("org.apache.hadoop.mapred.TaskAttemptContext", true, Thread.currentThread().getContextClassLoader());
		}
		// for Hadoop 2.xx
		else {
			clazz = Class.forName("org.apache.hadoop.mapred.TaskAttemptContextImpl", true, Thread.currentThread().getContextClassLoader());
		}
		Constructor<?> constructor = clazz.getDeclaredConstructor(JobConf.class, TaskAttemptID.class);
		// for Hadoop 1.xx
		constructor.setAccessible(true);
		TaskAttemptContext context = (TaskAttemptContext) constructor.newInstance(jobConf, taskAttemptID);
		return context;
	} catch(Exception e) {
		throw new Exception("Could not create instance of TaskAttemptContext.", e);
	}
}
 
开发者ID:citlab,项目名称:vs.msc.ws14,代码行数:21,代码来源:HadoopUtils.java

示例9: commitTask

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
public void commitTask(JobConf conf, TaskAttemptID taskAttemptID)
	throws IOException {
	Path taskOutputPath = getTempTaskOutputPath(conf, taskAttemptID);
	if (taskOutputPath != null) {
		FileSystem fs = taskOutputPath.getFileSystem(conf);
		if (fs.exists(taskOutputPath)) {
			Path jobOutputPath = taskOutputPath.getParent().getParent();
			// Move the task outputs to their final place
			moveTaskOutputs(conf,taskAttemptID, fs, jobOutputPath, taskOutputPath);
			// Delete the temporary task-specific output directory
			if (!fs.delete(taskOutputPath, true)) {
				LOG.info("Failed to delete the temporary output" +
					" directory of task: " + taskAttemptID + " - " + taskOutputPath);
			}
			LOG.info("Saved output of task '" + taskAttemptID + "' to " +
				jobOutputPath);
		}
	}
}
 
开发者ID:citlab,项目名称:vs.msc.ws14,代码行数:20,代码来源:HadoopFileOutputCommitter.java

示例10: needsTaskCommit

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
public boolean needsTaskCommit(JobConf conf, TaskAttemptID taskAttemptID)
	throws IOException {
	try {
		Path taskOutputPath = getTempTaskOutputPath(conf, taskAttemptID);
		if (taskOutputPath != null) {
			// Get the file-system for the task output directory
			FileSystem fs = taskOutputPath.getFileSystem(conf);
			// since task output path is created on demand,
			// if it exists, task needs a commit
			if (fs.exists(taskOutputPath)) {
				return true;
			}
		}
	} catch (IOException  ioe) {
		throw ioe;
	}
	return false;
}
 
开发者ID:citlab,项目名称:vs.msc.ws14,代码行数:19,代码来源:HadoopFileOutputCommitter.java

示例11: getTempTaskOutputPath

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
public Path getTempTaskOutputPath(JobConf conf, TaskAttemptID taskAttemptID) {
	Path outputPath = FileOutputFormat.getOutputPath(conf);
	if (outputPath != null) {
		Path p = new Path(outputPath,
			(FileOutputCommitter.TEMP_DIR_NAME + Path.SEPARATOR +
				"_" + taskAttemptID.toString()));
		try {
			FileSystem fs = p.getFileSystem(conf);
			return p.makeQualified(fs);
		} catch (IOException ie) {
			LOG.warn(StringUtils.stringifyException(ie));
			return p;
		}
	}
	return null;
}
 
开发者ID:citlab,项目名称:vs.msc.ws14,代码行数:17,代码来源:HadoopFileOutputCommitter.java

示例12: initStdOut

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
/**
 * clean previous std error and outs
 */

private void initStdOut(JobConf configuration) {
  TaskAttemptID taskId = TaskAttemptID.forName(configuration
          .get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);
  File stdErr = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDERR);
  // prepare folder
  if (!stdOut.getParentFile().exists()) {
    stdOut.getParentFile().mkdirs();
  } else { // clean logs
    stdOut.deleteOnExit();
    stdErr.deleteOnExit();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestPipeApplication.java

示例13: readStdOut

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
private String readStdOut(JobConf conf) throws Exception {
  TaskAttemptID taskId = TaskAttemptID.forName(conf
          .get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);

  return readFile(stdOut);

}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestPipeApplication.java

示例14: testTipFailed

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
@SuppressWarnings("rawtypes")
@Test
public void testTipFailed() throws Exception {
  JobConf job = new JobConf();
  job.setNumMapTasks(2);

  TaskStatus status = new TaskStatus() {
    @Override
    public boolean getIsMap() {
      return false;
    }

    @Override
    public void addFetchFailedMap(TaskAttemptID mapTaskId) {
    }
  };
  Progress progress = new Progress();

  TaskAttemptID reduceId = new TaskAttemptID("314159", 0, TaskType.REDUCE,
      0, 0);
  ShuffleSchedulerImpl scheduler = new ShuffleSchedulerImpl(job, status,
      reduceId, null, progress, null, null, null);

  JobID jobId = new JobID();
  TaskID taskId1 = new TaskID(jobId, TaskType.REDUCE, 1);
  scheduler.tipFailed(taskId1);

  Assert.assertEquals("Progress should be 0.5", 0.5f, progress.getProgress(),
      0.0f);
  Assert.assertFalse(scheduler.waitUntilDone(1));

  TaskID taskId0 = new TaskID(jobId, TaskType.REDUCE, 0);
  scheduler.tipFailed(taskId0);
  Assert.assertEquals("Progress should be 1.0", 1.0f, progress.getProgress(),
      0.0f);
  Assert.assertTrue(scheduler.waitUntilDone(1));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestShuffleScheduler.java

示例15: getMockedCompletionEventsUpdate

import org.apache.hadoop.mapred.TaskAttemptID; //导入依赖的package包/类
private MapTaskCompletionEventsUpdate getMockedCompletionEventsUpdate(
    int startIdx, int numEvents) {
  ArrayList<TaskCompletionEvent> tceList =
      new ArrayList<TaskCompletionEvent>(numEvents);
  for (int i = 0; i < numEvents; ++i) {
    int eventIdx = startIdx + i;
    TaskCompletionEvent tce = new TaskCompletionEvent(eventIdx,
        new TaskAttemptID("12345", 1, TaskType.MAP, eventIdx, 0),
        eventIdx, true, TaskCompletionEvent.Status.SUCCEEDED,
        "http://somehost:8888");
    tceList.add(tce);
  }
  TaskCompletionEvent[] events = {};
  return new MapTaskCompletionEventsUpdate(tceList.toArray(events), false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestEventFetcher.java


注:本文中的org.apache.hadoop.mapred.TaskAttemptID类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。