本文整理匯總了Java中org.apache.hadoop.mapreduce.TaskType類的典型用法代碼示例。如果您正苦於以下問題:Java TaskType類的具體用法?Java TaskType怎麽用?Java TaskType使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
TaskType類屬於org.apache.hadoop.mapreduce包,在下文中一共展示了TaskType類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: setDatum
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
public void setDatum(Object oDatum) {
this.datum = (ReduceAttemptFinished)oDatum;
this.attemptId = TaskAttemptID.forName(datum.attemptId.toString());
this.taskType = TaskType.valueOf(datum.taskType.toString());
this.taskStatus = datum.taskStatus.toString();
this.shuffleFinishTime = datum.shuffleFinishTime;
this.sortFinishTime = datum.sortFinishTime;
this.finishTime = datum.finishTime;
this.hostname = datum.hostname.toString();
this.rackName = datum.rackname.toString();
this.port = datum.port;
this.state = datum.state.toString();
this.counters = EventReader.fromAvro(datum.counters);
this.clockSplits = AvroArrayUtils.fromAvro(datum.clockSplits);
this.cpuUsages = AvroArrayUtils.fromAvro(datum.cpuUsages);
this.gpuUsages = AvroArrayUtils.fromAvro(datum.gpuUsages);
this.vMemKbytes = AvroArrayUtils.fromAvro(datum.vMemKbytes);
this.physMemKbytes = AvroArrayUtils.fromAvro(datum.physMemKbytes);
}
示例2: setDatum
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
public void setDatum(Object odatum) {
this.datum =
(TaskAttemptUnsuccessfulCompletion)odatum;
this.attemptId =
TaskAttemptID.forName(datum.attemptId.toString());
this.taskType =
TaskType.valueOf(datum.taskType.toString());
this.finishTime = datum.finishTime;
this.hostname = datum.hostname.toString();
this.rackName = datum.rackname.toString();
this.port = datum.port;
this.status = datum.status.toString();
this.error = datum.error.toString();
this.counters =
EventReader.fromAvro(datum.counters);
this.clockSplits =
AvroArrayUtils.fromAvro(datum.clockSplits);
this.cpuUsages =
AvroArrayUtils.fromAvro(datum.cpuUsages);
this.gpuUsages =
AvroArrayUtils.fromAvro(datum.gpuUsages);
this.vMemKbytes =
AvroArrayUtils.fromAvro(datum.vMemKbytes);
this.physMemKbytes =
AvroArrayUtils.fromAvro(datum.physMemKbytes);
}
示例3: setDatum
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
public void setDatum(Object oDatum) {
this.datum = (MapAttemptFinished)oDatum;
this.attemptId = TaskAttemptID.forName(datum.attemptId.toString());
this.taskType = TaskType.valueOf(datum.taskType.toString());
this.taskStatus = datum.taskStatus.toString();
this.mapFinishTime = datum.mapFinishTime;
this.finishTime = datum.finishTime;
this.hostname = datum.hostname.toString();
this.rackName = datum.rackname.toString();
this.port = datum.port;
this.state = datum.state.toString();
this.counters = EventReader.fromAvro(datum.counters);
this.clockSplits = AvroArrayUtils.fromAvro(datum.clockSplits);
this.cpuUsages = AvroArrayUtils.fromAvro(datum.cpuUsages);
this.gpuUsages = AvroArrayUtils.fromAvro(datum.gpuUsages);
this.vMemKbytes = AvroArrayUtils.fromAvro(datum.vMemKbytes);
this.physMemKbytes = AvroArrayUtils.fromAvro(datum.physMemKbytes);
}
示例4: setDatum
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
public void setDatum(Object odatum) {
this.datum = (TaskFailed)odatum;
this.id =
TaskID.forName(datum.taskid.toString());
this.taskType =
TaskType.valueOf(datum.taskType.toString());
this.finishTime = datum.finishTime;
this.error = datum.error.toString();
this.failedDueToAttempt =
datum.failedDueToAttempt == null
? null
: TaskAttemptID.forName(
datum.failedDueToAttempt.toString());
this.status = datum.status.toString();
this.counters =
EventReader.fromAvro(datum.counters);
}
示例5: TaskAttemptStartedEvent
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
/**
* Create an event to record the start of an attempt
* @param attemptId Id of the attempt
* @param taskType Type of task
* @param startTime Start time of the attempt
* @param trackerName Name of the Task Tracker where attempt is running
* @param httpPort The port number of the tracker
* @param shufflePort The shuffle port number of the container
* @param containerId The containerId for the task attempt.
* @param locality The locality of the task attempt
* @param avataar The avataar of the task attempt
*/
public TaskAttemptStartedEvent( TaskAttemptID attemptId,
TaskType taskType, long startTime, String trackerName,
int httpPort, int shufflePort, ContainerId containerId,
String locality, String avataar) {
datum.attemptId = new Utf8(attemptId.toString());
datum.taskid = new Utf8(attemptId.getTaskID().toString());
datum.startTime = startTime;
datum.taskType = new Utf8(taskType.name());
datum.trackerName = new Utf8(trackerName);
datum.httpPort = httpPort;
datum.shufflePort = shufflePort;
datum.containerId = new Utf8(containerId.toString());
if (locality != null) {
datum.locality = new Utf8(locality);
}
if (avataar != null) {
datum.avataar = new Utf8(avataar);
}
}
示例6: maskAttemptID
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
/**
* Mask the job ID part in a {@link TaskAttemptID}.
*
* @param attemptId
* raw {@link TaskAttemptID} read from trace
* @return masked {@link TaskAttemptID} with empty {@link JobID}.
*/
private TaskAttemptID maskAttemptID(TaskAttemptID attemptId) {
JobID jobId = new JobID();
TaskType taskType = attemptId.getTaskType();
TaskID taskId = attemptId.getTaskID();
return new TaskAttemptID(jobId.getJtIdentifier(), jobId.getId(), taskType,
taskId.getId(), attemptId.getId());
}
示例7: getSuccessfulAttemptInfo
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
private TaskAttemptInfo getSuccessfulAttemptInfo(TaskType type, int task) {
TaskAttemptInfo ret;
for (int i = 0; true; ++i) {
// Rumen should make up an attempt if it's missing. Or this won't work
// at all. It's hard to discern what is happening in there.
ret = jobdesc.getTaskAttemptInfo(type, task, i);
if (ret.getRunState() == TaskStatus.State.SUCCEEDED) {
break;
}
}
if(ret.getRunState() != TaskStatus.State.SUCCEEDED) {
LOG.warn("No sucessful attempts tasktype " + type +" task "+ task);
}
return ret;
}
示例8: testTaskAttemptFinishedEvent
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
/**
* test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testTaskAttemptFinishedEvent() throws Exception {
JobID jid = new JobID("001", 1);
TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
TaskAttemptID taskAttemptId = new TaskAttemptID(tid, 3);
Counters counters = new Counters();
TaskAttemptFinishedEvent test = new TaskAttemptFinishedEvent(taskAttemptId,
TaskType.REDUCE, "TEST", 123L, "RAKNAME", "HOSTNAME", "STATUS",
counters);
assertEquals(test.getAttemptId().toString(), taskAttemptId.toString());
assertEquals(test.getCounters(), counters);
assertEquals(test.getFinishTime(), 123L);
assertEquals(test.getHostname(), "HOSTNAME");
assertEquals(test.getRackName(), "RAKNAME");
assertEquals(test.getState(), "STATUS");
assertEquals(test.getTaskId(), tid);
assertEquals(test.getTaskStatus(), "TEST");
assertEquals(test.getTaskType(), TaskType.REDUCE);
}
示例9: getTaskReports
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
throws IOException{
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
GetTaskReportsRequest request =
recordFactory.newRecordInstance(GetTaskReportsRequest.class);
request.setJobId(jobId);
request.setTaskType(TypeConverter.toYarn(taskType));
List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
request)).getTaskReportList();
return TypeConverter.fromYarn
(taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
示例10: createDummyTask
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
private Task createDummyTask(TaskType type) throws IOException, ClassNotFoundException,
InterruptedException {
JobConf conf = new JobConf();
conf.setOutputCommitter(CommitterThatAlwaysRequiresCommit.class);
Path outDir = new Path(rootDir, "output");
FileOutputFormat.setOutputPath(conf, outDir);
JobID jobId = JobID.forName("job_201002121132_0001");
Task testTask;
if (type == TaskType.MAP) {
testTask = new MapTask();
} else {
testTask = new ReduceTask();
}
testTask.setConf(conf);
testTask.initialize(conf, jobId, Reporter.NULL, false);
return testTask;
}
示例11: testCompletedTaskAttempt
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
/**
* test some methods of CompletedTaskAttempt
*/
@Test (timeout=5000)
public void testCompletedTaskAttempt(){
TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
when(attemptInfo.getRackname()).thenReturn("Rackname");
when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
when(attemptInfo.getSortFinishTime()).thenReturn(12L);
when(attemptInfo.getShufflePort()).thenReturn(10);
JobID jobId= new JobID("12345",0);
TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
assertEquals( "Rackname", taskAttemt.getNodeRackName());
assertEquals( Phase.CLEANUP, taskAttemt.getPhase());
assertTrue( taskAttemt.isFinished());
assertEquals( 11L, taskAttemt.getShuffleFinishTime());
assertEquals( 12L, taskAttemt.getSortFinishTime());
assertEquals( 10, taskAttemt.getShufflePort());
}
示例12: testTaskCompletionEvent
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
/**
* test deprecated methods of TaskCompletionEvent
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testTaskCompletionEvent() {
TaskAttemptID taid = new TaskAttemptID("001", 1, TaskType.REDUCE, 2, 3);
TaskCompletionEvent template = new TaskCompletionEvent(12, taid, 13, true,
Status.SUCCEEDED, "httptracker");
TaskCompletionEvent testEl = TaskCompletionEvent.downgrade(template);
testEl.setTaskAttemptId(taid);
testEl.setTaskTrackerHttp("httpTracker");
testEl.setTaskId("attempt_001_0001_m_000002_04");
assertEquals("attempt_001_0001_m_000002_4",testEl.getTaskId());
testEl.setTaskStatus(Status.OBSOLETE);
assertEquals(Status.OBSOLETE.toString(), testEl.getStatus().toString());
testEl.setTaskRunTime(20);
assertEquals(testEl.getTaskRunTime(), 20);
testEl.setEventId(16);
assertEquals(testEl.getEventId(), 16);
}
示例13: createOutputCommitter
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
private org.apache.hadoop.mapreduce.OutputCommitter
createOutputCommitter(boolean newApiCommitter, JobID jobId, Configuration conf) throws Exception {
org.apache.hadoop.mapreduce.OutputCommitter committer = null;
LOG.info("OutputCommitter set in config "
+ conf.get("mapred.output.committer.class"));
if (newApiCommitter) {
org.apache.hadoop.mapreduce.TaskID taskId =
new org.apache.hadoop.mapreduce.TaskID(jobId, TaskType.MAP, 0);
org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID =
new org.apache.hadoop.mapreduce.TaskAttemptID(taskId, 0);
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
new TaskAttemptContextImpl(conf, taskAttemptID);
OutputFormat outputFormat =
ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), conf);
committer = outputFormat.getOutputCommitter(taskContext);
} else {
committer = ReflectionUtils.newInstance(conf.getClass(
"mapred.output.committer.class", FileOutputCommitter.class,
org.apache.hadoop.mapred.OutputCommitter.class), conf);
}
LOG.info("OutputCommitter is " + committer.getClass().getName());
return committer;
}
示例14: merge
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec,
Path[] inputs, boolean deleteInputs,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return
new MergeQueue<K, V>(conf, fs, inputs, deleteInputs, codec, comparator,
reporter, null,
TaskType.REDUCE).merge(keyClass, valueClass,
mergeFactor, tmpDir,
readsCounter, writesCounter,
mergePhase);
}
示例15: displayTasks
import org.apache.hadoop.mapreduce.TaskType; //導入依賴的package包/類
/**
* Display the information about a job's tasks, of a particular type and
* in a particular state
*
* @param job the job
* @param type the type of the task (map/reduce/setup/cleanup)
* @param state the state of the task
* (pending/running/completed/failed/killed)
*/
protected void displayTasks(Job job, String type, String state)
throws IOException, InterruptedException {
TaskReport[] reports = job.getTaskReports(TaskType.valueOf(
org.apache.hadoop.util.StringUtils.toUpperCase(type)));
for (TaskReport report : reports) {
TIPStatus status = report.getCurrentStatus();
if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
(state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
(state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
(state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
(state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
printTaskAttempts(report);
}
}
}