本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.api.records.TaskType类的典型用法代码示例。如果您正苦于以下问题:Java TaskType类的具体用法?Java TaskType怎么用?Java TaskType使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TaskType类属于org.apache.hadoop.mapreduce.v2.api.records包,在下文中一共展示了TaskType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: containerNeed
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
private AtomicInteger containerNeed(TaskId taskID) {
JobId jobID = taskID.getJobId();
TaskType taskType = taskID.getTaskType();
ConcurrentMap<JobId, AtomicInteger> relevantMap
= taskType == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;
AtomicInteger result = relevantMap.get(jobID);
if (result == null) {
relevantMap.putIfAbsent(jobID, new AtomicInteger(0));
result = relevantMap.get(jobID);
}
return result;
}
示例2: ReduceTaskAttemptInfo
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
public ReduceTaskAttemptInfo(TaskAttempt ta, TaskType type) {
super(ta, type, false);
this.shuffleFinishTime = ta.getShuffleFinishTime();
this.mergeFinishTime = ta.getSortFinishTime();
this.elapsedShuffleTime = Times.elapsed(this.startTime,
this.shuffleFinishTime, false);
if (this.elapsedShuffleTime == -1) {
this.elapsedShuffleTime = 0;
}
this.elapsedMergeTime = Times.elapsed(this.shuffleFinishTime,
this.mergeFinishTime, false);
if (this.elapsedMergeTime == -1) {
this.elapsedMergeTime = 0;
}
this.elapsedReduceTime = Times.elapsed(this.mergeFinishTime,
this.finishTime, false);
if (this.elapsedReduceTime == -1) {
this.elapsedReduceTime = 0;
}
}
示例3: TaskInfo
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
public TaskInfo(Task task) {
TaskType ttype = task.getType();
this.type = ttype.toString();
TaskReport report = task.getReport();
this.startTime = report.getStartTime();
this.finishTime = report.getFinishTime();
this.state = report.getTaskState();
this.elapsedTime = Times.elapsed(this.startTime, this.finishTime,
this.state == TaskState.RUNNING);
if (this.elapsedTime == -1) {
this.elapsedTime = 0;
}
this.progress = report.getProgress() * 100;
this.status = report.getStatus();
this.id = MRApps.toString(task.getID());
this.taskNum = task.getID().getId();
this.successful = getSuccessfulAttempt(task);
if (successful != null) {
this.successfulAttempt = MRApps.toString(successful.getID());
} else {
this.successfulAttempt = "";
}
}
示例4: getJobTasks
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
@GET
@Path("/jobs/{jobid}/tasks")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TasksInfo getJobTasks(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid, @QueryParam("type") String type) {
init();
Job job = getJobFromJobIdString(jid, appCtx);
checkAccess(job, hsr);
TasksInfo allTasks = new TasksInfo();
for (Task task : job.getTasks().values()) {
TaskType ttype = null;
if (type != null && !type.isEmpty()) {
try {
ttype = MRApps.taskType(type);
} catch (YarnRuntimeException e) {
throw new BadRequestException("tasktype must be either m or r");
}
}
if (ttype != null && task.getType() != ttype) {
continue;
}
allTasks.add(new TaskInfo(task));
}
return allTasks;
}
示例5: getJobTaskAttempts
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
@GET
@Path("/jobs/{jobid}/tasks/{taskid}/attempts")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid, @PathParam("taskid") String tid) {
init();
TaskAttemptsInfo attempts = new TaskAttemptsInfo();
Job job = getJobFromJobIdString(jid, appCtx);
checkAccess(job, hsr);
Task task = getTaskFromTaskIdString(tid, job);
for (TaskAttempt ta : task.getAttempts().values()) {
if (ta != null) {
if (task.getType() == TaskType.REDUCE) {
attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
} else {
attempts.add(new TaskAttemptInfo(ta, task.getType(), true));
}
}
}
return attempts;
}
示例6: getTaskAttempts
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
@Override
protected Collection<TaskAttempt> getTaskAttempts() {
List<TaskAttempt> fewTaskAttemps = new ArrayList<TaskAttempt>();
String taskTypeStr = $(TASK_TYPE);
TaskType taskType = MRApps.taskType(taskTypeStr);
String attemptStateStr = $(ATTEMPT_STATE);
TaskAttemptStateUI neededState = MRApps
.taskAttemptState(attemptStateStr);
Job j = app.getJob();
Map<TaskId, Task> tasks = j.getTasks(taskType);
for (Task task : tasks.values()) {
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
for (TaskAttempt attempt : attempts.values()) {
if (neededState.correspondsTo(attempt.getState())) {
fewTaskAttemps.add(attempt);
}
}
}
return fewTaskAttemps;
}
示例7: createReq
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
private ContainerRequestEvent
createReq(JobId jobId, int taskAttemptId, int memory, String[] hosts,
boolean earlierFailedAttempt, boolean reduce) {
TaskId taskId;
if (reduce) {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
} else {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
}
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
taskAttemptId);
Resource containerNeed = Resource.newInstance(memory, 1);
if (earlierFailedAttempt) {
return ContainerRequestEvent
.createContainerRequestEventForFailedContainer(attemptId,
containerNeed);
}
return new ContainerRequestEvent(attemptId, containerNeed, hosts,
new String[] { NetworkTopology.DEFAULT_RACK });
}
示例8: sendLaunchedEvents
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
.getJobId());
jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
eventHandler.handle(jce);
LOG.info("TaskAttempt: [" + attemptId
+ "] using containerId: [" + container.getId() + " on NM: ["
+ StringInterner.weakIntern(container.getNodeId().toString()) + "]");
TaskAttemptStartedEvent tase =
new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
launchTime, trackerName, httpPort, shufflePort, container.getId(),
locality.toString(), avataar.toString());
eventHandler.handle(
new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
示例9: transition
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
// too many fetch failure can only happen for map tasks
Preconditions
.checkArgument(taskAttempt.getID().getTaskId().getTaskType() == TaskType.MAP);
//add to diagnostic
taskAttempt.addDiagnosticInfo("Too Many fetch failures.Failing the attempt");
if (taskAttempt.getLaunchTime() != 0) {
taskAttempt.eventHandler
.handle(createJobCounterUpdateEventTAFailed(taskAttempt, true));
TaskAttemptUnsuccessfulCompletionEvent tauce =
createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
TaskAttemptStateInternal.FAILED);
taskAttempt.eventHandler.handle(new JobHistoryEvent(
taskAttempt.attemptId.getTaskId().getJobId(), tauce));
}else {
LOG.debug("Not generating HistoryFinish event since start event not " +
"generated for taskAttempt: " + taskAttempt.getID());
}
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
示例10: computeProgress
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
private void computeProgress() {
this.readLock.lock();
try {
float mapProgress = 0f;
float reduceProgress = 0f;
for (Task task : this.tasks.values()) {
if (task.getType() == TaskType.MAP) {
mapProgress += (task.isFinished() ? 1f : task.getProgress());
} else {
reduceProgress += (task.isFinished() ? 1f : task.getProgress());
}
}
if (this.numMapTasks != 0) {
mapProgress = mapProgress / this.numMapTasks;
}
if (this.numReduceTasks != 0) {
reduceProgress = reduceProgress / this.numReduceTasks;
}
this.mapProgress = mapProgress;
this.reduceProgress = reduceProgress;
} finally {
this.readLock.unlock();
}
}
示例11: getTasks
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
@Override
public Map<TaskId,Task> getTasks(TaskType taskType) {
Map<TaskId, Task> localTasksCopy = tasks;
Map<TaskId, Task> result = new HashMap<TaskId, Task>();
Set<TaskId> tasksOfGivenType = null;
readLock.lock();
try {
if (TaskType.MAP == taskType) {
tasksOfGivenType = mapTasks;
} else {
tasksOfGivenType = reduceTasks;
}
for (TaskId taskID : tasksOfGivenType)
result.put(taskID, localTasksCopy.get(taskID));
return result;
} finally {
readLock.unlock();
}
}
示例12: getTaskReports
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
@Override
public GetTaskReportsResponse getTaskReports(
GetTaskReportsRequest request) throws IOException {
JobId jobId = request.getJobId();
TaskType taskType = request.getTaskType();
GetTaskReportsResponse response =
recordFactory.newRecordInstance(GetTaskReportsResponse.class);
Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true);
Collection<Task> tasks = job.getTasks(taskType).values();
LOG.info("Getting task report for " + taskType + " " + jobId
+ ". Report-size will be " + tasks.size());
// Take lock to allow only one call, otherwise heap will blow up because
// of counters in the report when there are multiple callers.
synchronized (getTaskReportsLock) {
for (Task task : tasks) {
response.addTaskReport(task.getReport());
}
}
return response;
}
示例13: remove
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
boolean remove(TaskAttemptId tId) {
ContainerId containerId = null;
if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
containerId = maps.remove(tId).getId();
} else {
containerId = reduces.remove(tId).getId();
if (containerId != null) {
boolean preempted = preemptionWaitingReduces.remove(tId);
if (preempted) {
LOG.info("Reduce preemption successful " + tId);
}
}
}
if (containerId != null) {
containerToAttemptMap.remove(containerId);
return true;
}
return false;
}
示例14: getReduceProgress
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
private float getReduceProgress() {
Job job = myAppContext.getJob(myAttemptID.getTaskId().getJobId());
float runtime = getCodeRuntime();
Collection<Task> allMapTasks = job.getTasks(TaskType.MAP).values();
int numberMaps = allMapTasks.size();
int numberDoneMaps = 0;
for (Task mapTask : allMapTasks) {
if (mapTask.isFinished()) {
++numberDoneMaps;
}
}
if (numberMaps == numberDoneMaps) {
shuffleCompletedTime = Math.min(shuffleCompletedTime, clock.getTime());
return Math.min
((float) (clock.getTime() - shuffleCompletedTime)
/ (runtime * 2000.0F) + 0.5F,
1.0F);
} else {
return ((float) numberDoneMaps) / numberMaps * 0.5F;
}
}
示例15: verifyAMTaskAttempt
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; //导入依赖的package包/类
public void verifyAMTaskAttempt(JSONObject info, TaskAttempt att,
TaskType ttype) throws JSONException {
if (ttype == TaskType.REDUCE) {
assertEquals("incorrect number of elements", 17, info.length());
} else {
assertEquals("incorrect number of elements", 12, info.length());
}
verifyTaskAttemptGeneric(att, ttype, info.getString("id"),
info.getString("state"), info.getString("type"),
info.getString("rack"), info.getString("nodeHttpAddress"),
info.getString("diagnostics"), info.getString("assignedContainerId"),
info.getLong("startTime"), info.getLong("finishTime"),
info.getLong("elapsedTime"), (float) info.getDouble("progress"));
if (ttype == TaskType.REDUCE) {
verifyReduceTaskAttemptGeneric(att, info.getLong("shuffleFinishTime"),
info.getLong("mergeFinishTime"), info.getLong("elapsedShuffleTime"),
info.getLong("elapsedMergeTime"), info.getLong("elapsedReduceTime"));
}
}