本文整理汇总了Java中org.apache.hadoop.mapreduce.Cluster.getJob方法的典型用法代码示例。如果您正苦于以下问题:Java Cluster.getJob方法的具体用法?Java Cluster.getJob怎么用?Java Cluster.getJob使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.Cluster
的用法示例。
在下文中一共展示了Cluster.getJob方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: cancel
import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
@Override
public void cancel(String jobId) throws IOException {
JobID id = JobID.forName(jobId);
Cluster cluster = new Cluster(this.getConf());
try {
Job job = cluster.getJob(id);
if (job == null) {
LOG.error("No job found for " + id);
// should we throw exception
return;
}
if (job.isComplete() || job.isRetired()) {
return;
}
job.killJob();
LOG.debug("Killed copy job " + id);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
示例2: getTaskReports
import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
public static Iterator<TaskReport> getTaskReports(Job job, TaskType type) throws IOException {
if (job.getJobConf().getBoolean(PigConfiguration.PIG_NO_TASK_REPORT, false)) {
LOG.info("TaskReports are disabled for job: " + job.getAssignedJobID());
return null;
}
Cluster cluster = new Cluster(job.getJobConf());
try {
org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
if (mrJob == null) { // In local mode, mrJob will be null
mrJob = job.getJob();
}
org.apache.hadoop.mapreduce.TaskReport[] reports = mrJob.getTaskReports(type);
return DowngradeHelper.downgradeTaskReports(reports);
} catch (InterruptedException ir) {
throw new IOException(ir);
}
}
示例3: determineProgresses
import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
/**
* @return three progress values, in [0,1], as a {@code float[]}, representing setup, mapper and reducer progress
*/
private float[] determineProgresses() throws IOException, InterruptedException {
if (exec == null) {
return null;
}
Cluster cluster = new Cluster(getConf());
try {
JobID jobID = getJob().getJobID();
if (jobID == null) {
return null;
}
Job runningJob = cluster.getJob(jobID);
if (runningJob == null) {
return null;
}
return new float[] { runningJob.setupProgress(), runningJob.mapProgress(), runningJob.reduceProgress() };
} finally {
cluster.close();
}
}
示例4: getCounters
import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
public static Counters getCounters(Job job) throws IOException {
try {
Cluster cluster = new Cluster(job.getJobConf());
org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
if (mrJob == null) { // In local mode, mrJob will be null
mrJob = job.getJob();
}
return new Counters(mrJob.getCounters());
} catch (Exception ir) {
throw new IOException(ir);
}
}
示例5: stopAllExistingMRJobs
import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
private void stopAllExistingMRJobs(String blurEnv, Configuration conf) throws YarnException, IOException,
InterruptedException {
Cluster cluster = new Cluster(conf);
JobStatus[] allJobStatuses = cluster.getAllJobStatuses();
for (JobStatus jobStatus : allJobStatuses) {
if (jobStatus.isJobComplete()) {
continue;
}
String jobFile = jobStatus.getJobFile();
JobID jobID = jobStatus.getJobID();
Job job = cluster.getJob(jobID);
FileSystem fileSystem = FileSystem.get(job.getConfiguration());
Configuration configuration = new Configuration(false);
Path path = new Path(jobFile);
Path makeQualified = path.makeQualified(fileSystem.getUri(), fileSystem.getWorkingDirectory());
if (hasReadAccess(fileSystem, makeQualified)) {
try (FSDataInputStream in = fileSystem.open(makeQualified)) {
configuration.addResource(copy(in));
}
String jobBlurEnv = configuration.get(BLUR_ENV);
LOG.info("Checking job [{0}] has env [{1}] current env set to [{2}]", jobID, jobBlurEnv, blurEnv);
if (blurEnv.equals(jobBlurEnv)) {
LOG.info("Killing running job [{0}]", jobID);
job.killJob();
}
}
}
}
示例6: determineStatus
import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
private StepStatus determineStatus() throws IOException, InterruptedException {
JobContext job = getJob();
if (job == null) {
return StepStatus.COMPLETED;
}
Cluster cluster = new Cluster(getConf());
try {
JobID jobID = job.getJobID();
if (jobID == null) {
return StepStatus.PENDING;
}
Job runningJob = cluster.getJob(jobID);
if (runningJob == null) {
return StepStatus.PENDING;
}
JobStatus.State state = runningJob.getJobState();
switch (state) {
case PREP:
return StepStatus.PENDING;
case RUNNING:
return StepStatus.RUNNING;
case SUCCEEDED:
return StepStatus.COMPLETED;
case FAILED:
return StepStatus.FAILED;
case KILLED:
return StepStatus.CANCELLED;
}
throw new IllegalArgumentException("Unknown Hadoop job state " + state);
} finally {
cluster.close();
}
}
示例7: submitSleepJob
import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
static Job submitSleepJob(final int numMappers, final int numReducers, final long mapSleepTime,
final long reduceSleepTime, boolean shouldComplete, String userInfo,
String queueName, Configuration clientConf) throws IOException,
InterruptedException, ClassNotFoundException {
clientConf.set(JTConfig.JT_IPC_ADDRESS, "localhost:"
+ miniMRCluster.getJobTrackerPort());
UserGroupInformation ugi;
if (userInfo != null) {
String[] splits = userInfo.split(",");
String[] groups = new String[splits.length - 1];
System.arraycopy(splits, 1, groups, 0, splits.length - 1);
ugi = UserGroupInformation.createUserForTesting(splits[0], groups);
} else {
ugi = UserGroupInformation.getCurrentUser();
}
if (queueName != null) {
clientConf.set(JobContext.QUEUE_NAME, queueName);
}
final SleepJob sleep = new SleepJob();
sleep.setConf(clientConf);
Job job = ugi.doAs(new PrivilegedExceptionAction<Job>() {
public Job run() throws IOException {
return sleep.createJob(numMappers, numReducers, mapSleepTime,
(int) mapSleepTime, reduceSleepTime, (int) reduceSleepTime);
}});
if (shouldComplete) {
job.waitForCompletion(false);
} else {
job.submit();
// miniMRCluster.getJobTrackerRunner().getJobTracker().jobsToComplete()[]
Cluster cluster = new Cluster(miniMRCluster.createJobConf());
JobStatus[] status = miniMRCluster.getJobTrackerRunner().getJobTracker()
.jobsToComplete();
JobID id = status[status.length -1].getJobID();
Job newJob = cluster.getJob(id);
cluster.close();
return newJob;
}
return job;
}