当前位置: 首页>>代码示例>>Java>>正文


Java Cluster.getJob方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Cluster.getJob方法的典型用法代码示例。如果您正苦于以下问题:Java Cluster.getJob方法的具体用法?Java Cluster.getJob怎么用?Java Cluster.getJob使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.Cluster的用法示例。


在下文中一共展示了Cluster.getJob方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: cancel

import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
@Override
public void cancel(String jobId) throws IOException {
  JobID id = JobID.forName(jobId);
  Cluster cluster = new Cluster(this.getConf());
  try {
    Job job = cluster.getJob(id);
    if (job == null) {
      LOG.error("No job found for " + id);
      // should we throw exception
      return;
    }
    if (job.isComplete() || job.isRetired()) {
      return;
    }

    job.killJob();
    LOG.debug("Killed copy job " + id);
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:22,代码来源:MapReduceBackupCopyJob.java

示例2: getTaskReports

import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
public static Iterator<TaskReport> getTaskReports(Job job, TaskType type) throws IOException {
    if (job.getJobConf().getBoolean(PigConfiguration.PIG_NO_TASK_REPORT, false)) {
        LOG.info("TaskReports are disabled for job: " + job.getAssignedJobID());
        return null;
    }
    Cluster cluster = new Cluster(job.getJobConf());
    try {
        org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
        if (mrJob == null) { // In local mode, mrJob will be null
            mrJob = job.getJob();
        }
        org.apache.hadoop.mapreduce.TaskReport[] reports = mrJob.getTaskReports(type);
        return DowngradeHelper.downgradeTaskReports(reports);
    } catch (InterruptedException ir) {
        throw new IOException(ir);
    }
}
 
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:18,代码来源:HadoopShims.java

示例3: determineProgresses

import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
/**
 * @return three progress values, in [0,1], as a {@code float[]}, representing setup, mapper and reducer progress
 */
private float[] determineProgresses() throws IOException, InterruptedException {
  if (exec == null) {
    return null;
  }
  Cluster cluster = new Cluster(getConf());
  try {
    JobID jobID = getJob().getJobID();
    if (jobID == null) {
      return null;
    }
    Job runningJob = cluster.getJob(jobID);
    if (runningJob == null) {
      return null;
    }

    return new float[] { runningJob.setupProgress(), runningJob.mapProgress(), runningJob.reduceProgress() };
  } finally {
    cluster.close();
  }
}
 
开发者ID:apsaltis,项目名称:oryx,代码行数:24,代码来源:JobStep.java

示例4: getCounters

import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
public static Counters getCounters(Job job) throws IOException {
    try {
        Cluster cluster = new Cluster(job.getJobConf());
        org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
        if (mrJob == null) { // In local mode, mrJob will be null
            mrJob = job.getJob();
        }
        return new Counters(mrJob.getCounters());
    } catch (Exception ir) {
        throw new IOException(ir);
    }
}
 
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:13,代码来源:HadoopShims.java

示例5: stopAllExistingMRJobs

import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
private void stopAllExistingMRJobs(String blurEnv, Configuration conf) throws YarnException, IOException,
    InterruptedException {
  Cluster cluster = new Cluster(conf);
  JobStatus[] allJobStatuses = cluster.getAllJobStatuses();
  for (JobStatus jobStatus : allJobStatuses) {
    if (jobStatus.isJobComplete()) {
      continue;
    }
    String jobFile = jobStatus.getJobFile();
    JobID jobID = jobStatus.getJobID();
    Job job = cluster.getJob(jobID);
    FileSystem fileSystem = FileSystem.get(job.getConfiguration());
    Configuration configuration = new Configuration(false);
    Path path = new Path(jobFile);
    Path makeQualified = path.makeQualified(fileSystem.getUri(), fileSystem.getWorkingDirectory());
    if (hasReadAccess(fileSystem, makeQualified)) {
      try (FSDataInputStream in = fileSystem.open(makeQualified)) {
        configuration.addResource(copy(in));
      }
      String jobBlurEnv = configuration.get(BLUR_ENV);
      LOG.info("Checking job [{0}] has env [{1}] current env set to [{2}]", jobID, jobBlurEnv, blurEnv);
      if (blurEnv.equals(jobBlurEnv)) {
        LOG.info("Killing running job [{0}]", jobID);
        job.killJob();
      }
    }
  }
}
 
开发者ID:apache,项目名称:incubator-blur,代码行数:29,代码来源:ClusterDriver.java

示例6: determineStatus

import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
private StepStatus determineStatus() throws IOException, InterruptedException {
  JobContext job = getJob();
  if (job == null) {
    return StepStatus.COMPLETED;
  }
  Cluster cluster = new Cluster(getConf());
  try {
    JobID jobID = job.getJobID();
    if (jobID == null) {
      return StepStatus.PENDING;
    }
    Job runningJob = cluster.getJob(jobID);
    if (runningJob == null) {
      return StepStatus.PENDING;
    }
    JobStatus.State state = runningJob.getJobState();
    switch (state) {
      case PREP:
        return StepStatus.PENDING;
      case RUNNING:
        return StepStatus.RUNNING;
      case SUCCEEDED:
        return StepStatus.COMPLETED;
      case FAILED:
        return StepStatus.FAILED;
      case KILLED:
        return StepStatus.CANCELLED;
    }
    throw new IllegalArgumentException("Unknown Hadoop job state " + state);
  } finally {
    cluster.close();
  }
}
 
开发者ID:apsaltis,项目名称:oryx,代码行数:34,代码来源:JobStep.java

示例7: submitSleepJob

import org.apache.hadoop.mapreduce.Cluster; //导入方法依赖的package包/类
static Job submitSleepJob(final int numMappers, final int numReducers, final long mapSleepTime,
    final long reduceSleepTime, boolean shouldComplete, String userInfo,
    String queueName, Configuration clientConf) throws IOException,
    InterruptedException, ClassNotFoundException {
  clientConf.set(JTConfig.JT_IPC_ADDRESS, "localhost:"
      + miniMRCluster.getJobTrackerPort());
  UserGroupInformation ugi;
  if (userInfo != null) {
    String[] splits = userInfo.split(",");
    String[] groups = new String[splits.length - 1];
    System.arraycopy(splits, 1, groups, 0, splits.length - 1);
    ugi = UserGroupInformation.createUserForTesting(splits[0], groups);
  } else {
    ugi = UserGroupInformation.getCurrentUser();
  }
  if (queueName != null) {
    clientConf.set(JobContext.QUEUE_NAME, queueName);
  }
  final SleepJob sleep = new SleepJob();
  sleep.setConf(clientConf);
  
  Job job = ugi.doAs(new PrivilegedExceptionAction<Job>() {
      public Job run() throws IOException {
        return sleep.createJob(numMappers, numReducers, mapSleepTime,
            (int) mapSleepTime, reduceSleepTime, (int) reduceSleepTime);
    }});
  if (shouldComplete) {
    job.waitForCompletion(false);
  } else {
    job.submit();
    // miniMRCluster.getJobTrackerRunner().getJobTracker().jobsToComplete()[]
    Cluster cluster = new Cluster(miniMRCluster.createJobConf());
    JobStatus[] status = miniMRCluster.getJobTrackerRunner().getJobTracker()
        .jobsToComplete();
    JobID id = status[status.length -1].getJobID();
    Job newJob = cluster.getJob(id);
    cluster.close();
    return newJob;
  }
  return job;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:42,代码来源:QueueManagerTestUtils.java


注:本文中的org.apache.hadoop.mapreduce.Cluster.getJob方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。