当前位置: 首页>>代码示例>>Java>>正文


Java SleepJob.createJob方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.SleepJob.createJob方法的典型用法代码示例。如果您正苦于以下问题:Java SleepJob.createJob方法的具体用法?Java SleepJob.createJob怎么用?Java SleepJob.createJob使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.SleepJob的用法示例。


在下文中一共展示了SleepJob.createJob方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testJobWithNonNormalizedCapabilities

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestMRAMWithNonNormalizedCapabilities.java

示例2: runSleepJob

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
private JobID runSleepJob(JobConf conf) throws Exception {
  SleepJob sleep = new SleepJob();
  sleep.setConf(conf);
  Job job = sleep.createJob(1, 10, 1000, 1, 10000, 1);
  job.waitForCompletion(true);
  return job.getJobID();
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:8,代码来源:TestJobDirCleanup.java

示例3: testJobSubmission

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
@Test
public void testJobSubmission() throws Exception {
  Configuration conf = new Configuration(cluster.getConf());
  SleepJob job = new SleepJob();
  job.setConf(conf);
  Job rJob = job.createJob(1, 1, 100, 100, 100, 100);
  rJob = cluster.getJTClient().submitAndVerifyJob(rJob);
  cluster.getJTClient().verifyJobHistory(rJob.getJobID());
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:10,代码来源:TestCluster.java

示例4: testFailingJobInitalization

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
/**
 * Test case which checks if the jobs which
 * fail initialization are removed from the
 * {@link CapacityTaskScheduler} waiting queue.
 *
 * @throws Exception
 */
public void testFailingJobInitalization() throws Exception {
  Properties schedulerProps = new Properties();
  Properties clusterProps = new Properties();
  clusterProps.put("mapred.queue.names","default");
  clusterProps.put(TTConfig.TT_MAP_SLOTS, String.valueOf(1));
  clusterProps.put(TTConfig.TT_REDUCE_SLOTS, String.valueOf(1));
  clusterProps.put(JTConfig.JT_TASKS_PER_JOB, String.valueOf(1));
  clusterProps.put(JTConfig.JT_PERSIST_JOBSTATUS, "false");
  // cluster capacity 1 maps, 1 reduces
  startCluster(1, clusterProps, schedulerProps);
  CapacityTaskScheduler scheduler = (CapacityTaskScheduler) getJobTracker()
    .getTaskScheduler();

   AbstractQueue root = scheduler.getRoot();
   root.getChildren().get(0).getQueueSchedulingContext().setCapacityPercent(100);

  JobConf conf = getJobConf();
  conf.setSpeculativeExecution(false);
  conf.setNumTasksToExecutePerJvm(-1);
  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(conf);
  Job job = sleepJob.createJob(3, 3, 1, 1, 1, 1);
  job.waitForCompletion(false);
  assertFalse(
    "The submitted job successfully completed",
    job.isSuccessful());

  JobQueuesManager mgr = scheduler.jobQueuesManager;
  assertEquals(
    "Failed job present in Waiting queue", 0, mgr
      .getJobQueue("default").getWaitingJobCount());
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:40,代码来源:TestCapacitySchedulerWithJobTracker.java

示例5: testSleepJobInternal

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
private void testSleepJobInternal(boolean useRemoteJar) throws Exception {
  LOG.info("\n\n\nStarting testSleepJob: useRemoteJar=" + useRemoteJar);

  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
             + " not found. Not running test.");
    return;
  }

  Configuration sleepConf = new Configuration(mrCluster.getConfig());
  // set master address to local to test that local mode applied iff framework == local
  sleepConf.set(MRConfig.MASTER_ADDRESS, "local");	
  
  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(sleepConf);
 
  // job with 3 maps (10s) and numReduces reduces (5s), 1 "record" each:
  Job job = sleepJob.createJob(3, numSleepReducers, 10000, 1, 5000, 1);

  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  if (useRemoteJar) {
    final Path localJar = new Path(
        ClassUtil.findContainingJar(SleepJob.class));
    ConfigUtil.addLink(job.getConfiguration(), "/jobjars",
        localFs.makeQualified(localJar.getParent()).toUri());
    job.setJar("viewfs:///jobjars/" + localJar.getName());
  } else {
    job.setJarByClass(SleepJob.class);
  }
  job.setMaxMapAttempts(1); // speed up failures
  job.submit();
  String trackingUrl = job.getTrackingURL();
  String jobId = job.getJobID().toString();
  boolean succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
  Assert.assertTrue("Tracking URL was " + trackingUrl +
                    " but didn't Match Job ID " + jobId ,
        trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
  verifySleepJobCounters(job);
  verifyTaskProgress(job);
  
  // TODO later:  add explicit "isUber()" checks of some sort (extend
  // JobStatus?)--compare against MRJobConfig.JOB_UBERTASK_ENABLE value
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:TestMRJobs.java

示例6: testJobClassloader

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
private void testJobClassloader(boolean useCustomClasses) throws IOException,
    InterruptedException, ClassNotFoundException {
  LOG.info("\n\n\nStarting testJobClassloader()"
      + " useCustomClasses=" + useCustomClasses);

  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
             + " not found. Not running test.");
    return;
  }
  final Configuration sleepConf = new Configuration(mrCluster.getConfig());
  // set master address to local to test that local mode applied iff framework == local
  sleepConf.set(MRConfig.MASTER_ADDRESS, "local");
  sleepConf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, true);
  if (useCustomClasses) {
    // to test AM loading user classes such as output format class, we want
    // to blacklist them from the system classes (they need to be prepended
    // as the first match wins)
    String systemClasses = ApplicationClassLoader.SYSTEM_CLASSES_DEFAULT;
    // exclude the custom classes from system classes
    systemClasses = "-" + CustomOutputFormat.class.getName() + ",-" +
        CustomSpeculator.class.getName() + "," +
        systemClasses;
    sleepConf.set(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES,
        systemClasses);
  }
  sleepConf.set(MRJobConfig.IO_SORT_MB, TEST_IO_SORT_MB);
  sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
  sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
  sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString());
  sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class");
  final SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(sleepConf);
  final Job job = sleepJob.createJob(1, 1, 10, 1, 10, 1);
  job.setMapperClass(ConfVerificationMapper.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.setJarByClass(SleepJob.class);
  job.setMaxMapAttempts(1); // speed up failures
  if (useCustomClasses) {
    // set custom output format class and speculator class
    job.setOutputFormatClass(CustomOutputFormat.class);
    final Configuration jobConf = job.getConfiguration();
    jobConf.setClass(MRJobConfig.MR_AM_JOB_SPECULATOR, CustomSpeculator.class,
        Speculator.class);
    // speculation needs to be enabled for the speculator to be loaded
    jobConf.setBoolean(MRJobConfig.MAP_SPECULATIVE, true);
  }
  job.submit();
  boolean succeeded = job.waitForCompletion(true);
  Assert.assertTrue("Job status: " + job.getStatus().getFailureInfo(),
      succeeded);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestMRJobs.java

示例7: testJobHistoryData

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
@Test (timeout = 90000)
public void testJobHistoryData() throws IOException, InterruptedException,
    AvroRemoteException, ClassNotFoundException {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }


  
  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(mrCluster.getConfig());
  // Job with 3 maps and 2 reduces
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.waitForCompletion(true);
  Counters counterMR = job.getCounters();
  JobId jobId = TypeConverter.toYarn(job.getJobID());
  ApplicationId appID = jobId.getAppId();
  int pollElapsed = 0;
  while (true) {
    Thread.sleep(1000);
    pollElapsed += 1000;

    if (TERMINAL_RM_APP_STATES.contains(
        mrCluster.getResourceManager().getRMContext().getRMApps().get(appID)
        .getState())) {
      break;
    }

    if (pollElapsed >= 60000) {
      LOG.warn("application did not reach terminal state within 60 seconds");
      break;
    }
  }
  Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager()
    .getRMContext().getRMApps().get(appID).getState());
  Counters counterHS = job.getCounters();
  //TODO the Assert below worked. need to check
  //Should we compare each field or convert to V2 counter and compare
  LOG.info("CounterHS " + counterHS);
  LOG.info("CounterMR " + counterMR);
  Assert.assertEquals(counterHS, counterMR);
  
  HSClientProtocol historyClient = instantiateHistoryProxy();
  GetJobReportRequest gjReq = Records.newRecord(GetJobReportRequest.class);
  gjReq.setJobId(jobId);
  JobReport jobReport = historyClient.getJobReport(gjReq).getJobReport();
  verifyJobReport(jobReport, jobId);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestMRJobsWithHistoryService.java

示例8: testJobWithChangePriority

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
@Test(timeout = 3000000)
public void testJobWithChangePriority() throws Exception {

  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }

  Configuration sleepConf = new Configuration(mrCluster.getConfig());
  // set master address to local to test that local mode applied if framework
  // equals local
  sleepConf.set(MRConfig.MASTER_ADDRESS, "local");
  sleepConf
      .setInt("yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms", 5);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(sleepConf);
  Job job = sleepJob.createJob(1, 1, 1000, 20, 50, 1);

  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.setJarByClass(SleepJob.class);
  job.setMaxMapAttempts(1); // speed up failures
  job.submit();

  // Set the priority to HIGH
  job.setPriority(JobPriority.HIGH);
  waitForPriorityToUpdate(job, JobPriority.HIGH);
  // Verify the priority from job itself
  Assert.assertEquals(job.getPriority(), JobPriority.HIGH);

  // Change priority to NORMAL (3) with new api
  job.setPriorityAsInteger(3); // Verify the priority from job itself
  waitForPriorityToUpdate(job, JobPriority.NORMAL);
  Assert.assertEquals(job.getPriority(), JobPriority.NORMAL);

  // Change priority to a high integer value with new api
  job.setPriorityAsInteger(89); // Verify the priority from job itself
  waitForPriorityToUpdate(job, JobPriority.UNDEFINED_PRIORITY);
  Assert.assertEquals(job.getPriority(), JobPriority.UNDEFINED_PRIORITY);

  boolean succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:46,代码来源:TestMRJobs.java

示例9: testJobFailAndKill

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
public void testJobFailAndKill() throws Exception {
  MiniMRCluster mr = null;
  try {
    JobConf jtConf = new JobConf();
    jtConf.set("mapred.jobtracker.instrumentation", 
        JTInstrumentation.class.getName());
    jtConf.set("mapreduce.tasktracker.taskcontroller",
        MockStackDumpTaskController.class.getName());
    mr = new MiniMRCluster(2, "file:///", 3, null, null, jtConf);
    JTInstrumentation instr = (JTInstrumentation) 
      mr.getJobTrackerRunner().getJobTracker().getInstrumentation();

    // run the TCs
    JobConf conf = mr.createJobConf();
    conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 50);
    
    Path inDir = new Path(TEST_ROOT_DIR + "/failkilljob/input");
    Path outDir = new Path(TEST_ROOT_DIR + "/failkilljob/output");
    RunningJob runningJob = UtilsForTests.runJobFail(conf, inDir, outDir);
    // Checking that the Job got failed
    assertEquals(runningJob.getJobState(), JobStatus.FAILED);
    assertTrue(instr.verifyJob());
    assertEquals(1, instr.failed);
    instr.reset();

    int prevNumDumps = MockStackDumpTaskController.numStackDumps;
    runningJob = UtilsForTests.runJobKill(conf, inDir, outDir);
    // Checking that the Job got killed
    assertTrue(runningJob.isComplete());
    assertEquals(runningJob.getJobState(), JobStatus.KILLED);
    assertTrue(instr.verifyJob());
    assertEquals(1, instr.killed);
    // check that job kill does not put a stacktrace in task logs.
    checkForStackDump(false, prevNumDumps);

    // Test that a task that times out does have a stack trace
    conf = mr.createJobConf();
    conf.setInt(JobContext.TASK_TIMEOUT, 10000);
    conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 50);
    SleepJob sleepJob = new SleepJob();
    sleepJob.setConf(conf);
    Job job = sleepJob.createJob(1, 0, 30000, 1,0, 0);
    job.setMaxMapAttempts(1);
    prevNumDumps = MockStackDumpTaskController.numStackDumps;
    job.waitForCompletion(true);
    checkForStackDump(true, prevNumDumps);
  } finally {
    if (mr != null) {
      mr.shutdown();
    }
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:53,代码来源:TestJobKillAndFail.java

示例10: testFilePermission

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
@Test
public void testFilePermission() throws Exception {
  wovenClient = cluster.getJTClient().getProxy();
  Configuration conf = new Configuration(cluster.getConf());
  FinishTaskControlAction.configureControlActionForJob(conf);
  SleepJob job = new SleepJob();
  job.setConf(conf);
  Job slpJob = job.createJob(1, 0, 100, 100, 100, 100);
  JobConf jconf = new JobConf(conf);
  slpJob.submit();
  RunningJob rJob =
      cluster.getJTClient().getClient().getJob(
          org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID()));
  taskController = conf.get(TTConfig.TT_TASK_CONTROLLER);
  // get the job info so we can get the env variables from the daemon.
  // Now wait for the task to be in the running state, only then the
  // directories will be created
  JobInfo info = wovenClient.getJobInfo(rJob.getID());
  Assert.assertNotNull("JobInfo is null", info);
  JobID id = rJob.getID();
  while (info.runningMaps() != 1) {
    Thread.sleep(1000);
    info = wovenClient.getJobInfo(id);
  }
  TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
  for (TaskInfo tInfo : myTaskInfos) {
    if (!tInfo.isSetupOrCleanup()) {
      String[] taskTrackers = tInfo.getTaskTrackers();
      for (String taskTracker : taskTrackers) {
        TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
        TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
        Assert.assertNotNull("TTClient instance is null", ttCli);
        TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        Assert.assertNotNull("TTTaskInfo is null", ttTaskInfo);
        while (ttTaskInfo.getTaskStatus().getRunState() != TaskStatus.State.RUNNING) {
          Thread.sleep(100);
          ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        }
        testPermissionWithTaskController(ttCli, conf, info);
        FinishTaskControlAction action =
            new FinishTaskControlAction(TaskID.downgrade(tInfo.getTaskID()));
        for (TTClient cli : cluster.getTTClients()) {
          cli.getProxy().sendAction(action);
        }
      }
    }
  }
  JobInfo jInfo = wovenClient.getJobInfo(id);
  jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  while (!jInfo.getStatus().isJobComplete()) {
    Thread.sleep(100);
    jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:55,代码来源:TestFileOwner.java

示例11: testFailedTaskJobStatus

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
/**
 * Verifying the running job status whether it succeeds or not after failing
 * some of its tasks.
 * 
 * @throws ClassNotFoundException
 */
@Test
public void testFailedTaskJobStatus()
    throws IOException, InterruptedException, ClassNotFoundException {
  Configuration conf = new Configuration(cluster.getConf());
  TaskInfo taskInfo = null;
  SleepJob job = new SleepJob();
  job.setConf(conf);
  Job slpJob = job.createJob(3, 1, 4000, 4000, 100, 100);
  JobConf jobConf = new JobConf(conf);
  jobConf.setMaxMapAttempts(20);
  jobConf.setMaxReduceAttempts(20);
  slpJob.submit();
  RunningJob runJob =
      jobClient.getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob
          .getJobID()));
  JobID id = runJob.getID();
  JobInfo jInfo = remoteJTClient.getJobInfo(id);
  int counter = 0;
  while (counter < 60) {
    if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
      break;
    } else {
      UtilsForTests.waitFor(1000);
      jInfo = remoteJTClient.getJobInfo(id);
    }
    counter++;
  }
  Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

  TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
  for (TaskInfo taskinfo : taskInfos) {
    if (!taskinfo.isSetupOrCleanup()) {
      taskInfo = taskinfo;
    }
  }

  counter = 0;
  taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
  while (counter < 60) {
    if (taskInfo.getTaskStatus().length > 0) {
      if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) {
        break;
      }
    }
    UtilsForTests.waitFor(1000);
    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
    counter++;
  }
  Assert.assertTrue("Task has not been started for 1 min.", counter != 60);

  NetworkedJob networkJob = new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster);
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
  networkJob.killTask(taskAttID, false);

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = remoteJTClient.getJobInfo(id);
  }

  Assert.assertEquals(
      "JobStatus", jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:71,代码来源:TestTaskKilling.java

示例12: testJobTrackerIntegration

import org.apache.hadoop.mapreduce.SleepJob; //导入方法依赖的package包/类
/**
 * Test case which checks {@link JobTracker} and {@link CapacityTaskScheduler}
 * <p/>
 * Test case submits 2 jobs in two different capacity scheduler queues.
 * And checks if the jobs successfully complete.
 *
 * @throws Exception
 */

public void testJobTrackerIntegration() throws Exception {

  Properties schedulerProps = new Properties();
  String[] queues = new String[]{"Q1", "Q2"};
  Job jobs[] = new Job[2];

  Properties clusterProps = new Properties();
  clusterProps.put(TTConfig.TT_MAP_SLOTS, String.valueOf(2));
  clusterProps.put(TTConfig.TT_REDUCE_SLOTS, String.valueOf(2));
  clusterProps.put("mapred.queue.names", queues[0] + "," + queues[1]);
  clusterProps.put(JTConfig.JT_PERSIST_JOBSTATUS, "false");
  startCluster(2, clusterProps, schedulerProps);
  CapacityTaskScheduler scheduler = (CapacityTaskScheduler) getJobTracker()
    .getTaskScheduler();



  AbstractQueue root = scheduler.getRoot();

  for(AbstractQueue q : root.getChildren()) {
    q.getQueueSchedulingContext().setCapacityPercent(50);
    q.getQueueSchedulingContext().setUlMin(100);
  }


  LOG.info("WE CREATED THE QUEUES TEST 2");
 // scheduler.taskTrackerManager.getQueueManager().setQueues(qs);
 // scheduler.start();

  JobConf conf = getJobConf();
  conf.setSpeculativeExecution(false);
  conf.set(MRJobConfig.SETUP_CLEANUP_NEEDED, "false");
  conf.setNumTasksToExecutePerJvm(-1);
  conf.setQueueName(queues[0]);
  SleepJob sleepJob1 = new SleepJob();
  sleepJob1.setConf(conf);
  jobs[0] = sleepJob1.createJob(1, 1, 1, 1, 1, 1);
  jobs[0].submit();

  JobConf conf2 = getJobConf();
  conf2.setSpeculativeExecution(false);
  conf2.setNumTasksToExecutePerJvm(-1);
  conf2.setQueueName(queues[1]);
  SleepJob sleepJob2 = new SleepJob();
  sleepJob2.setConf(conf2);
  jobs[1] = sleepJob2.createJob(3, 3, 5, 3, 5, 3);
  jobs[0].waitForCompletion(false);
  jobs[1].waitForCompletion(false);
  assertTrue(
    "Sleep job submitted to queue 1 is not successful", jobs[0]
      .isSuccessful());
  assertTrue(
    "Sleep job submitted to queue 2 is not successful", jobs[1]
      .isSuccessful());
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:65,代码来源:TestCapacitySchedulerWithJobTracker.java


注:本文中的org.apache.hadoop.mapreduce.SleepJob.createJob方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。