本文整理汇总了Java中org.apache.hadoop.mapred.TaskID类的典型用法代码示例。如果您正苦于以下问题:Java TaskID类的具体用法?Java TaskID怎么用?Java TaskID使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TaskID类属于org.apache.hadoop.mapred包,在下文中一共展示了TaskID类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getTaskID
import org.apache.hadoop.mapred.TaskID; //导入依赖的package包/类
public static TaskID getTaskID(Configuration cfg) {
// first try with the attempt since some Hadoop versions mix the two
String taskAttemptId = HadoopCfgUtils.getTaskAttemptId(cfg);
if (StringUtils.hasText(taskAttemptId)) {
try {
return TaskAttemptID.forName(taskAttemptId).getTaskID();
} catch (IllegalArgumentException ex) {
// the task attempt is invalid (Tez in particular uses the wrong string - see #346)
// try to fallback to task id
return parseTaskIdFromTaskAttemptId(taskAttemptId);
}
}
String taskIdProp = HadoopCfgUtils.getTaskId(cfg);
// double-check task id bug in Hadoop 2.5.x
if (StringUtils.hasText(taskIdProp) && !taskIdProp.contains("attempt")) {
return TaskID.forName(taskIdProp);
}
return null;
}
示例2: parseTaskIdFromTaskAttemptId
import org.apache.hadoop.mapred.TaskID; //导入依赖的package包/类
private static TaskID parseTaskIdFromTaskAttemptId(String taskAttemptId) {
// Tez in particular uses an incorrect String task1244XXX instead of task_1244 which makes the parsing fail
// this method try to cope with such issues and look at the numbers if possible
if (taskAttemptId.startsWith("task")) {
taskAttemptId = taskAttemptId.substring(4);
}
if (taskAttemptId.startsWith("_")) {
taskAttemptId = taskAttemptId.substring(1);
}
List<String> tokenize = StringUtils.tokenize(taskAttemptId, "_");
// need at least 4 entries from 123123123123_0001_r_0000_4
if (tokenize.size() < 4) {
LogFactory.getLog(HadoopCfgUtils.class).warn("Cannot parse task attempt (too little arguments) " + taskAttemptId);
return null;
}
// we parse straight away - in case of an exception we can catch the new format
try {
return new TaskID(tokenize.get(0), Integer.parseInt(tokenize.get(1)), tokenize.get(2).startsWith("m"), Integer.parseInt(tokenize.get(3)));
} catch (Exception ex) {
LogFactory.getLog(HadoopCfgUtils.class).warn("Cannot parse task attempt " + taskAttemptId);
return null;
}
}
示例3: isTaskStopped
import org.apache.hadoop.mapred.TaskID; //导入依赖的package包/类
/**
* This methods provides the information on the particular task managed
* by a task tracker has stopped or not.
* @param TaskID is id of the task to get the status.
* @throws IOException if there is an error.
* @return true is stopped.
*/
public boolean isTaskStopped(TaskID tID) throws IOException {
int counter = 0;
if(tID != null && proxy.getTask(tID) != null) {
TaskStatus.State tState= proxy.getTask(tID).getTaskStatus().getRunState();
while ( counter < 60) {
if(tState != TaskStatus.State.RUNNING &&
tState != TaskStatus.State.UNASSIGNED) {
break;
}
UtilsForTests.waitFor(1000);
tState= proxy.getTask(tID).getTaskStatus().getRunState();
counter++;
}
}
return (counter != 60)? true : false;
}
示例4: signalAllTasks
import org.apache.hadoop.mapred.TaskID; //导入依赖的package包/类
/**
* Allow the job to continue through MR control job.
* @param id of the job.
* @throws IOException when failed to get task info.
*/
public void signalAllTasks(JobID id) throws IOException{
TaskInfo[] taskInfos = getJTClient().getProxy().getTaskInfo(id);
if(taskInfos !=null) {
for (TaskInfo taskInfoRemaining : taskInfos) {
if(taskInfoRemaining != null) {
FinishTaskControlAction action = new FinishTaskControlAction(TaskID
.downgrade(taskInfoRemaining.getTaskID()));
Collection<TTClient> tts = getTTClients();
for (TTClient cli : tts) {
cli.getProxy().sendAction(action);
}
}
}
}
}
示例5: TaskReport
import org.apache.hadoop.mapred.TaskID; //导入依赖的package包/类
/**
* Creates a new TaskReport object
* @param taskid
* @param progress
* @param state
* @param diagnostics
* @param currentStatus
* @param startTime
* @param finishTime
* @param counters
*/
public TaskReport(TaskID taskid, float progress, String state,
String[] diagnostics, TIPStatus currentStatus,
long startTime, long finishTime,
Counters counters) {
this.taskid = taskid;
this.progress = progress;
this.state = state;
this.diagnostics = diagnostics;
this.currentStatus = currentStatus;
this.startTime = startTime;
this.finishTime = finishTime;
this.counters = counters;
}
示例6: validateTaskStderr
import org.apache.hadoop.mapred.TaskID; //导入依赖的package包/类
void validateTaskStderr(StreamJob job, TaskType type)
throws IOException {
TaskAttemptID attemptId =
new TaskAttemptID(new TaskID(job.jobId_, type, 0), 0);
String log = MapReduceTestUtil.readTaskLog(TaskLog.LogName.STDERR,
attemptId, false);
// trim() is called on expectedStderr here because the method
// MapReduceTestUtil.readTaskLog() returns trimmed String.
assertTrue(log.equals(expectedStderr.trim()));
}
示例7: getInputFileForWrite
import org.apache.hadoop.mapred.TaskID; //导入依赖的package包/类
/**
* Create a local reduce input file name.
*
* @param mapId a map task id
* @param size the size of the file
*/
public Path getInputFileForWrite(TaskID mapId, long size, Configuration conf)
throws IOException {
return lDirAlloc.getLocalPathForWrite(
String.format(REDUCE_INPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, mapId.getId()), size,
conf);
}
示例8: getJobIdFromTaskId
import org.apache.hadoop.mapred.TaskID; //导入依赖的package包/类
@Nonnull
public static String getJobIdFromTaskId(@Nonnull String taskidStr) {
if (!taskidStr.startsWith("task_")) {// workaround for Tez
taskidStr = taskidStr.replace("task", "task_");
taskidStr = taskidStr.substring(0, taskidStr.lastIndexOf('_'));
}
TaskID taskId = TaskID.forName(taskidStr);
JobID jobId = taskId.getJobID();
return jobId.toString();
}
示例9: HeartBeat
import org.apache.hadoop.mapred.TaskID; //导入依赖的package包/类
HeartBeat(final Progressable progressable, Configuration cfg, TimeValue lead, final Log log) {
Assert.notNull(progressable, "a valid progressable is required to report status to Hadoop");
TimeValue tv = HadoopCfgUtils.getTaskTimeout(cfg);
Assert.isTrue(tv.getSeconds() <= 0 || tv.getSeconds() > lead.getSeconds(), "Hadoop timeout is shorter than the heartbeat");
this.progressable = progressable;
long cfgMillis = (tv.getMillis() > 0 ? tv.getMillis() : 0);
// the task is simple hence the delay = timeout - lead, that is when to start the notification right before the timeout
this.delay = new TimeValue(Math.abs(cfgMillis - lead.getMillis()), TimeUnit.MILLISECONDS);
this.log = log;
String taskId;
TaskID taskID = HadoopCfgUtils.getTaskID(cfg);
if (taskID == null) {
log.warn("Cannot determine task id...");
taskId = "<unknown>";
if (log.isTraceEnabled()) {
log.trace("Current configuration is " + HadoopCfgUtils.asProperties(cfg));
}
}
else {
taskId = "" + taskID;
}
id = taskId;
}
示例10: detectCurrentInstance
import org.apache.hadoop.mapred.TaskID; //导入依赖的package包/类
private int detectCurrentInstance(Configuration conf) {
TaskID taskID = HadoopCfgUtils.getTaskID(conf);
if (taskID == null) {
log.warn(String.format("Cannot determine task id - redirecting writes in a random fashion"));
return NO_TASK_ID;
}
return taskID.getId();
}
示例11: setup
import org.apache.hadoop.mapred.TaskID; //导入依赖的package包/类
@Override
protected void setup(Context context) throws IOException, InterruptedException {
this.context = context;
// handle a bug in MRUnit - should be fixed in MRUnit 1.0.0
when(context.getTaskAttemptID()).thenAnswer(new Answer<TaskAttemptID>() {
@Override
public TaskAttemptID answer(final InvocationOnMock invocation) {
// FIXME MRUNIT seems to pass taskid to the reduce task as mapred.TaskID rather than mapreduce.TaskID
return new TaskAttemptID(new TaskID("000000000000", 0, true, 0), 0);
}
});
super.setup(context);
}