本文整理汇总了Java中org.apache.hadoop.mapred.JobStatus类的典型用法代码示例。如果您正苦于以下问题:Java JobStatus类的具体用法?Java JobStatus怎么用?Java JobStatus使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
JobStatus类属于org.apache.hadoop.mapred包,在下文中一共展示了JobStatus类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: printJobAnalysis
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
private void printJobAnalysis() {
if (!job.getJobStatus().equals
(JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
System.out.println("No Analysis available as job did not finish");
return;
}
AnalyzedJob avg = new AnalyzedJob(job);
System.out.println("\nAnalysis");
System.out.println("=========");
printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
printLast(avg.getMapTasks(), "map", cFinishMapRed);
if (avg.getReduceTasks().length > 0) {
printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle",
avg.getAvgShuffleTime(), 10);
printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);
printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
avg.getAvgReduceTime(), 10);
printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
}
System.out.println("=========");
}
示例2: parse
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
/**
* Only used for unit tests.
*/
@Private
public synchronized JobInfo parse(EventReader reader) throws IOException {
if (info != null) {
return info;
}
info = new JobInfo();
parse(reader, this);
if (info.getJobStatus() == null) {
info.jobStatus = JobStatus.getJobRunState(JobStatus.FAILED);
if (info.getErrorInfo() == null || info.getErrorInfo().equals("")) {
info.errorInfo = "Application failed due to failed ApplicationMaster.\n"
+ "Only partial information is available; some values may be "
+ "inaccurate.";
}
}
if (info.getFinishTime() == -1L) {
info.finishTime = info.getLaunchTime();
}
return info;
}
示例3: isJobStarted
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
/**
* It uses to check whether job is started or not.
* @param id job id
* @return true if job is running.
* @throws IOException if an I/O error occurs.
*/
public boolean isJobStarted(JobID id) throws IOException {
JobInfo jInfo = getJobInfo(id);
int counter = 0;
while (counter < 60) {
if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
break;
} else {
UtilsForTests.waitFor(1000);
jInfo = getJobInfo(jInfo.getID());
Assert.assertNotNull("Job information is null",jInfo);
}
counter++;
}
return (counter != 60)? true : false ;
}
示例4: update
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
/**
* Update this job status according to the given JobStatus
*
* @param status
*/
void update(JobStatus status) {
this.status = status;
try {
this.counters = running.getCounters();
this.completed = running.isComplete();
this.successful = running.isSuccessful();
this.mapProgress = running.mapProgress();
this.reduceProgress = running.reduceProgress();
// running.getTaskCompletionEvents(fromEvent);
} catch (IOException ioe) {
ioe.printStackTrace();
}
this.completedMaps = (int) (this.totalMaps * this.mapProgress);
this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
}
示例5: submitJob
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
@Override
public synchronized JobStatus submitJob(JobID jobId) throws IOException {
boolean loggingEnabled = LOG.isDebugEnabled();
if (loggingEnabled) {
LOG.debug("submitJob for jobname = " + jobId);
}
if (jobs.containsKey(jobId)) {
// job already running, don't start twice
if (loggingEnabled) {
LOG.debug("Job '" + jobId.getId() + "' already present ");
}
return jobs.get(jobId).getStatus();
}
JobStory jobStory = SimulatorJobCache.get(jobId);
if (jobStory == null) {
throw new IllegalArgumentException("Job not found in SimulatorJobCache: "+jobId);
}
validateAndSetClock(jobStory.getSubmissionTime());
SimulatorJobInProgress job = new SimulatorJobInProgress(jobId, this,
this.conf,
jobStory);
return addJob(jobId, job);
}
示例6: finalizeJob
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
/**
* Safely clean-up all data structures at the end of the
* job (success/failure/killed). In addition to performing the tasks that the
* original finalizeJob does, we also inform the SimulatorEngine about the
* completion of this job.
*
* @param job completed job.
*/
@Override
synchronized void finalizeJob(JobInProgress job) {
// Let the SimulatorEngine know that the job is done
JobStatus cloneStatus = (JobStatus)job.getStatus().clone();
engine.markCompletedJob(cloneStatus,
SimulatorJobTracker.getClock().getTime());
JobID jobId = job.getStatus().getJobID();
LOG.info("Finished job " + jobId + " endtime = " +
getClock().getTime() + " with status: " +
JobStatus.getJobRunState(job.getStatus().getRunState()));
// for updating the metrics and JobHistory, invoke the original
// finalizeJob.
super.finalizeJob(job);
// now placing this job in queue for future nuking
cleanupJob(job);
}
示例7: convertMapreduceState
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
/**
* Convert map-reduce specific job status constants to Sqoop job status
* constants.
*
* @param status Map-reduce job constant
* @return Equivalent submission status
*/
private SubmissionStatus convertMapreduceState(int status) {
if(status == JobStatus.PREP) {
return SubmissionStatus.BOOTING;
} else if (status == JobStatus.RUNNING) {
return SubmissionStatus.RUNNING;
} else if (status == JobStatus.FAILED) {
return SubmissionStatus.FAILED;
} else if (status == JobStatus.KILLED) {
return SubmissionStatus.FAILED;
} else if (status == JobStatus.SUCCEEDED) {
return SubmissionStatus.SUCCEEDED;
}
throw new SqoopException(MapreduceSubmissionError.MAPREDUCE_0004,
"Unknown status " + status);
}
示例8: run
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
/** {@inheritDoc} */
@Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;
JobContext jobCtx = ctx.jobContext();
try {
OutputCommitter committer = jobCtx.getJobConf().getOutputCommitter();
if (abort)
committer.abortJob(jobCtx, JobStatus.State.FAILED);
else
committer.commitJob(jobCtx);
}
catch (IOException e) {
throw new IgniteCheckedException(e);
}
}
示例9: getJobDetails
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
public Map<String, Object> getJobDetails(JobClient jobClient, String jobId)
throws AnkushException {
String errMsg = "Unable to getch Hadoop jobs details, could not connect to Hadoop JobClient.";
try {
if (jobClient != null) {
// Get the jobs that are submitted.
JobStatus[] jobStatus = jobClient.getAllJobs();
for (JobStatus jobSts : jobStatus) {
}
}
} catch (Exception e) {
HadoopUtils.addAndLogError(this.LOG, this.clusterConfig, errMsg,
Constant.Component.Name.HADOOP, e);
throw new AnkushException(errMsg);
}
return null;
}
示例10: JobInfoImpl
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
public JobInfoImpl(
JobID id, boolean setupLaunched, boolean setupFinished,
boolean cleanupLaunched, int runningMaps, int runningReduces,
int waitingMaps, int waitingReduces, int finishedMaps,
int finishedReduces, JobStatus status, String historyUrl,
List<String> blackListedTracker, boolean isComplete, int numMaps,
int numReduces, boolean historyCopied) {
super();
this.blackListedTracker = blackListedTracker;
this.historyUrl = historyUrl;
this.id = id;
this.setupLaunched = setupLaunched;
this.setupFinished = setupFinished;
this.cleanupLaunched = cleanupLaunched;
this.status = status;
this.runningMaps = runningMaps;
this.runningReduces = runningReduces;
this.waitingMaps = waitingMaps;
this.waitingReduces = waitingReduces;
this.finishedMaps = finishedMaps;
this.finishedReduces = finishedReduces;
this.numMaps = numMaps;
this.numReduces = numReduces;
this.historyCopied = historyCopied;
}
示例11: ofInt
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
static JobState ofInt(int state) {
if (state == JobStatus.PREP) {
return PREPARE;
}
else if (state == JobStatus.RUNNING) {
return RUNNING;
}
else if (state == JobStatus.FAILED) {
return FAILED;
}
else if (state == JobStatus.SUCCEEDED) {
return SUCCEEDED;
}
else {
return null;
}
}
示例12: listGridmixJobIDs
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
/**
* List the current gridmix jobid's.
* @param client - job client.
* @param execJobCount - number of executed jobs.
* @return - list of gridmix jobid's.
*/
public static List<JobID> listGridmixJobIDs(JobClient client,
int execJobCount) throws IOException {
List<JobID> jobids = new ArrayList<JobID>();
JobStatus [] jobStatus = client.getAllJobs();
int numJobs = jobStatus.length;
for (int index = 1; index <= execJobCount; index++) {
JobStatus js = jobStatus[numJobs - index];
JobID jobid = js.getJobID();
String jobName = js.getJobName();
if (!jobName.equals("GRIDMIX_GENERATE_INPUT_DATA") &&
!jobName.equals("GRIDMIX_GENERATE_DISTCACHE_DATA")) {
jobids.add(jobid);
}
}
return (jobids.size() == 0)? null : jobids;
}
示例13: testRun_Running
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
@Test
public void testRun_Running() throws Exception {
String jobId = "job_201407251005_0815";
createDefinition("mytest", jobId);
RunningJob job = createJob(jobId, JobStatus.RUNNING);
when(job.getJobState()).thenReturn(JobStatus.RUNNING);
Assert.assertEquals(0, executorService.getQueue().size());
checkAllIndexes();
Assert.assertEquals(1, executorService.getQueue().size());
verify(model, VerificationModeFactory.times(1)).getIndexer(anyString());
verify(model, VerificationModeFactory.times(0)).updateIndexerInternal(any(IndexerDefinition.class));
Thread.sleep(60);
Assert.assertEquals(1, executorService.getQueue().size());
verify(model, VerificationModeFactory.times(2)).getIndexer(anyString());
verify(model, VerificationModeFactory.times(0)).updateIndexerInternal(any(IndexerDefinition.class));
when(job.getJobState()).thenReturn(JobStatus.SUCCEEDED);
Thread.sleep(60);
Assert.assertEquals(0, executorService.getQueue().size());
verify(model, VerificationModeFactory.times(3)).getIndexer(anyString());
verify(model, VerificationModeFactory.times(1)).updateIndexerInternal(any(IndexerDefinition.class));
}
示例14: handleJobFinishedEvent
import org.apache.hadoop.mapred.JobStatus; //导入依赖的package包/类
private void handleJobFinishedEvent(JobFinishedEvent event) {
info.finishTime = event.getFinishTime();
info.finishedMaps = event.getFinishedMaps();
info.finishedReduces = event.getFinishedReduces();
info.failedMaps = event.getFailedMaps();
info.failedReduces = event.getFailedReduces();
info.totalCounters = event.getTotalCounters();
info.mapCounters = event.getMapCounters();
info.reduceCounters = event.getReduceCounters();
info.jobStatus = JobStatus.getJobRunState(JobStatus.SUCCEEDED);
}