本文整理汇总了Java中org.apache.hadoop.mapreduce.TypeConverter.fromYarn方法的典型用法代码示例。如果您正苦于以下问题:Java TypeConverter.fromYarn方法的具体用法?Java TypeConverter.fromYarn怎么用?Java TypeConverter.fromYarn使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.TypeConverter
的用法示例。
在下文中一共展示了TypeConverter.fromYarn方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: handle
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
@Override
public void handle(ContainerAllocatorEvent event) {
ContainerId cId =
ContainerId.newContainerId(getContext().getApplicationAttemptId(),
containerCount++);
NodeId nodeId = NodeId.newInstance(NM_HOST, NM_PORT);
Resource resource = Resource.newInstance(1234, 2, 2);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(cId, nodeId.toString(), "user",
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken = newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);
Container container = Container.newInstance(cId, nodeId,
NM_HOST + ":" + NM_HTTP_PORT, resource, null, containerToken);
JobID id = TypeConverter.fromYarn(applicationId);
JobId jobId = TypeConverter.toYarn(id);
getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(
org.apache.hadoop.mapreduce.TaskType.REDUCE,
100)));
getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(
org.apache.hadoop.mapreduce.TaskType.MAP,
100)));
getContext().getEventHandler().handle(
new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
container, null));
}
示例2: getJobCounters
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
public org.apache.hadoop.mapreduce.Counters getJobCounters(JobID arg0) throws IOException,
InterruptedException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobID = TypeConverter.toYarn(arg0);
GetCountersRequest request = recordFactory.newRecordInstance(GetCountersRequest.class);
request.setJobId(jobID);
Counters cnt = ((GetCountersResponse)
invoke("getCounters", GetCountersRequest.class, request)).getCounters();
return TypeConverter.fromYarn(cnt);
}
示例3: createTaskAttemptUnsuccessfulCompletionEvent
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
private static
TaskAttemptUnsuccessfulCompletionEvent
createTaskAttemptUnsuccessfulCompletionEvent(TaskAttemptImpl taskAttempt,
TaskAttemptStateInternal attemptState) {
TaskAttemptUnsuccessfulCompletionEvent tauce =
new TaskAttemptUnsuccessfulCompletionEvent(
TypeConverter.fromYarn(taskAttempt.attemptId),
TypeConverter.fromYarn(taskAttempt.attemptId.getTaskId()
.getTaskType()), attemptState.toString(),
taskAttempt.finishTime,
taskAttempt.container == null ? "UNKNOWN"
: taskAttempt.container.getNodeId().getHost(),
taskAttempt.container == null ? -1
: taskAttempt.container.getNodeId().getPort(),
taskAttempt.nodeRackName == null ? "UNKNOWN"
: taskAttempt.nodeRackName,
StringUtils.join(
LINE_SEPARATOR, taskAttempt.getDiagnostics()),
taskAttempt.getCounters(), taskAttempt
.getProgressSplitBlock().burst());
return tauce;
}
示例4: sendLaunchedEvents
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
.getJobId());
jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
eventHandler.handle(jce);
LOG.info("TaskAttempt: [" + attemptId
+ "] using containerId: [" + container.getId() + " on NM: ["
+ StringInterner.weakIntern(container.getNodeId().toString()) + "]");
TaskAttemptStartedEvent tase =
new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
launchTime, trackerName, httpPort, shufflePort, container.getId(),
locality.toString(), avataar.toString());
eventHandler.handle(
new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
示例5: createTaskFailedEvent
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
private static TaskFailedEvent createTaskFailedEvent(TaskImpl task, List<String> diag, TaskStateInternal taskState, TaskAttemptId taId) {
StringBuilder errorSb = new StringBuilder();
if (diag != null) {
for (String d : diag) {
errorSb.append(", ").append(d);
}
}
TaskFailedEvent taskFailedEvent = new TaskFailedEvent(
TypeConverter.fromYarn(task.taskId),
// Hack since getFinishTime needs isFinished to be true and that doesn't happen till after the transition.
task.getFinishTime(taId),
TypeConverter.fromYarn(task.getType()),
errorSb.toString(),
taskState.toString(),
taId == null ? null : TypeConverter.fromYarn(taId),
task.getCounters());
return taskFailedEvent;
}
示例6: getTask
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
@Override
public Task getTask(TaskId taskId) {
if (tasksLoaded.get()) {
return tasks.get(taskId);
} else {
TaskID oldTaskId = TypeConverter.fromYarn(taskId);
CompletedTask completedTask =
new CompletedTask(taskId, jobInfo.getAllTasks().get(oldTaskId));
return completedTask;
}
}
示例7: createRemoteTask
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
@Override
public Task createRemoteTask() {
//job file name is set in TaskAttempt, setting it null here
MapTask mapTask =
new MapTask("", TypeConverter.fromYarn(getID()), partition,
splitInfo.getSplitIndex(), 1); // YARN doesn't have the concept of slots per task, set it as 1.
mapTask.setUser(conf.get(MRJobConfig.USER_NAME));
mapTask.setConf(conf);
return mapTask;
}
示例8: getAllJobs
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
@Override
public Map<JobId, Job> getAllJobs(ApplicationId appID) {
if (LOG.isDebugEnabled()) {
LOG.debug("Called getAllJobs(AppId): " + appID);
}
// currently there is 1 to 1 mapping between app and job id
org.apache.hadoop.mapreduce.JobID oldJobID = TypeConverter.fromYarn(appID);
Map<JobId, Job> jobs = new HashMap<JobId, Job>();
JobId jobID = TypeConverter.toYarn(oldJobID);
jobs.put(jobID, getJob(jobID));
return jobs;
}
示例9: transition
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
TaskAttemptContext taskContext =
new TaskAttemptContextImpl(taskAttempt.conf,
TypeConverter.fromYarn(taskAttempt.attemptId));
taskAttempt.eventHandler.handle(new CommitterTaskAbortEvent(
taskAttempt.attemptId, taskContext));
}
示例10: sendTaskStartedEvent
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
private void sendTaskStartedEvent() {
TaskStartedEvent tse = new TaskStartedEvent(
TypeConverter.fromYarn(taskId), getLaunchTime(),
TypeConverter.fromYarn(taskId.getTaskType()),
getSplitsAsString());
eventHandler
.handle(new JobHistoryEvent(taskId.getJobId(), tse));
historyTaskStartGenerated = true;
}
示例11: createTaskFinishedEvent
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
private static TaskFinishedEvent createTaskFinishedEvent(TaskImpl task, TaskStateInternal taskState) {
TaskFinishedEvent tfe =
new TaskFinishedEvent(TypeConverter.fromYarn(task.taskId),
TypeConverter.fromYarn(task.successfulAttempt),
task.getFinishTime(task.successfulAttempt),
TypeConverter.fromYarn(task.taskId.getTaskType()),
taskState.toString(),
task.getCounters());
return tfe;
}
示例12: getMapAttemptCompletionEvents
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
@Override
public synchronized TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
if (mapCompletionEvents == null) {
constructTaskAttemptCompletionEvents();
}
return TypeConverter.fromYarn(getAttemptCompletionEvents(
mapCompletionEvents, startIndex, maxEvents));
}
示例13: getJobIDFromHistoryFilePath
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
/**
* Returns the jobId from a job history file name.
* @param pathString the path string.
* @return the JobId
* @throws IOException if the filename format is invalid.
*/
public static JobID getJobIDFromHistoryFilePath(String pathString) throws IOException {
String [] parts = pathString.split(Path.SEPARATOR);
String fileNamePart = parts[parts.length -1];
JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fileNamePart);
return TypeConverter.fromYarn(jobIndexInfo.getJobId());
}
示例14: setUp
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
resourceMgrDelegate = mock(ResourceMgrDelegate.class);
conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/[email protected]");
clientCache = new ClientCache(conf, resourceMgrDelegate);
clientCache = spy(clientCache);
yarnRunner = new YARNRunner(conf, resourceMgrDelegate, clientCache);
yarnRunner = spy(yarnRunner);
submissionContext = mock(ApplicationSubmissionContext.class);
doAnswer(
new Answer<ApplicationSubmissionContext>() {
@Override
public ApplicationSubmissionContext answer(InvocationOnMock invocation)
throws Throwable {
return submissionContext;
}
}
).when(yarnRunner).createApplicationSubmissionContext(any(Configuration.class),
any(String.class), any(Credentials.class));
appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
jobId = TypeConverter.fromYarn(appId);
if (testWorkDir.exists()) {
FileContext.getLocalFSFileContext().delete(new Path(testWorkDir.toString()), true);
}
testWorkDir.mkdirs();
}
示例15: checkTaskStateTypeConversion
import org.apache.hadoop.mapreduce.TypeConverter; //导入方法依赖的package包/类
@Test
public void checkTaskStateTypeConversion() {
//verify that all states can be converted without
// throwing an exception
for (TaskState state : TaskState.values()) {
TypeConverter.fromYarn(state);
}
}