当前位置: 首页>>代码示例>>Java>>正文


Java SleepJob类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.SleepJob的典型用法代码示例。如果您正苦于以下问题:Java SleepJob类的具体用法?Java SleepJob怎么用?Java SleepJob使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


SleepJob类属于org.apache.hadoop.mapreduce包,在下文中一共展示了SleepJob类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testLocalJobLibjarsOption

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://testcluster");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestLocalJobSubmission.java

示例2: createJob

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:CredentialsTestJob.java

示例3: testTokenCacheFile

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
/**
 * run a distributed job with -tokenCacheFile option parameter and
 * verify that no exception happens.
 * @throws IOException
*/
@Test
public void testTokenCacheFile() throws IOException {
  Configuration conf = mrCluster.getConfig();
  createBinaryTokenFile(conf);
  // provide namenodes names for the job to get the delegation tokens for
  final String nnUri = dfsCluster.getURI(0).toString();
  conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);

  // using argument to pass the file name
  final String[] args = {
      "-tokenCacheFile", binaryTokenFileName.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
      };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestBinaryTokenFile.java

示例4: testJobWithNonNormalizedCapabilities

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestMRAMWithNonNormalizedCapabilities.java

示例5: testLocalJobLibjarsOption

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000");
  conf.set(MRConfig.FRAMEWORK_NAME, "local");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:TestLocalJobSubmission.java

示例6: testLocalJobEncryptedIntermediateData

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
/**
 * test the local job submission with
 * intermediate data encryption enabled.
 * @throws IOException
 */
@Test
public void testLocalJobEncryptedIntermediateData() throws IOException {
  Configuration conf = new Configuration();
  conf.set(MRConfig.FRAMEWORK_NAME, "local");
  conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
  final String[] args = {
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestLocalJobSubmission.java

示例7: runJobAndVerifyFailure

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
private void runJobAndVerifyFailure(JobConf jobConf, long memForMapTasks,
    long memForReduceTasks, String expectedMsg)
    throws Exception,
    IOException {
  String[] args = { "-m", "0", "-r", "0", "-mt", "0", "-rt", "0" };
  boolean throwsException = false;
  String msg = null;
  try {
    ToolRunner.run(jobConf, new SleepJob(), args);
  } catch (RemoteException re) {
    throwsException = true;
    msg = re.unwrapRemoteException().getMessage();
  }
  assertTrue(throwsException);
  assertNotNull(msg);

  String overallExpectedMsg =
      "(" + memForMapTasks + " memForMapTasks " + memForReduceTasks
          + " memForReduceTasks): " + expectedMsg;
  assertTrue("Observed message - " + msg
      + " - doesn't contain expected message - " + overallExpectedMsg, msg
      .contains(overallExpectedMsg));
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:24,代码来源:TestSubmitJob.java

示例8: testTimeoutStackTrace

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
/** Ensure that SIGQUIT can be properly sent by the LinuxTaskController
 * if a task times out.
 */
public void testTimeoutStackTrace() throws Exception {
  if (!shouldRun()) {
    return;
  }

  // Run a job that should timeout and trigger a SIGQUIT.
  startCluster();
  jobOwner.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      JobConf conf = getClusterConf();
      conf.setInt(JobContext.TASK_TIMEOUT, 10000);
      conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 50);
      SleepJob sleepJob = new SleepJob();
      sleepJob.setConf(conf);
      Job job = sleepJob.createJob(1, 0, 30000, 1, 0, 0);
      job.setMaxMapAttempts(1);
      int prevNumSigQuits = MyLinuxTaskController.attemptedSigQuits;
      job.waitForCompletion(true);
      assertTrue("Did not detect a new SIGQUIT!",
          prevNumSigQuits < MyLinuxTaskController.attemptedSigQuits);
      assertEquals("A SIGQUIT attempt failed!", 0,
          MyLinuxTaskController.failedSigQuits);
      return null;
    }
  });
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:30,代码来源:TestJobExecutionAsDifferentUser.java

示例9: runSleepJob

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
private JobID runSleepJob(JobConf conf) throws Exception {
  SleepJob sleep = new SleepJob();
  sleep.setConf(conf);
  Job job = sleep.createJob(1, 10, 1000, 1, 10000, 1);
  job.waitForCompletion(true);
  return job.getJobID();
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:8,代码来源:TestJobDirCleanup.java

示例10: testJobSubmission

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
@Test
public void testJobSubmission() throws Exception {
  Configuration conf = new Configuration(cluster.getConf());
  SleepJob job = new SleepJob();
  job.setConf(conf);
  Job rJob = job.createJob(1, 1, 100, 100, 100, 100);
  rJob = cluster.getJTClient().submitAndVerifyJob(rJob);
  cluster.getJTClient().verifyJobHistory(rJob.getJobID());
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:10,代码来源:TestCluster.java

示例11: testFailingJobInitalization

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
/**
 * Test case which checks if the jobs which
 * fail initialization are removed from the
 * {@link CapacityTaskScheduler} waiting queue.
 *
 * @throws Exception
 */
public void testFailingJobInitalization() throws Exception {
  Properties schedulerProps = new Properties();
  Properties clusterProps = new Properties();
  clusterProps.put("mapred.queue.names","default");
  clusterProps.put(TTConfig.TT_MAP_SLOTS, String.valueOf(1));
  clusterProps.put(TTConfig.TT_REDUCE_SLOTS, String.valueOf(1));
  clusterProps.put(JTConfig.JT_TASKS_PER_JOB, String.valueOf(1));
  clusterProps.put(JTConfig.JT_PERSIST_JOBSTATUS, "false");
  // cluster capacity 1 maps, 1 reduces
  startCluster(1, clusterProps, schedulerProps);
  CapacityTaskScheduler scheduler = (CapacityTaskScheduler) getJobTracker()
    .getTaskScheduler();

   AbstractQueue root = scheduler.getRoot();
   root.getChildren().get(0).getQueueSchedulingContext().setCapacityPercent(100);

  JobConf conf = getJobConf();
  conf.setSpeculativeExecution(false);
  conf.setNumTasksToExecutePerJvm(-1);
  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(conf);
  Job job = sleepJob.createJob(3, 3, 1, 1, 1, 1);
  job.waitForCompletion(false);
  assertFalse(
    "The submitted job successfully completed",
    job.isSuccessful());

  JobQueuesManager mgr = scheduler.jobQueuesManager;
  assertEquals(
    "Failed job present in Waiting queue", 0, mgr
      .getJobQueue("default").getWaitingJobCount());
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:40,代码来源:TestCapacitySchedulerWithJobTracker.java

示例12: testSleepJobInternal

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
private void testSleepJobInternal(boolean useRemoteJar) throws Exception {
  LOG.info("\n\n\nStarting testSleepJob: useRemoteJar=" + useRemoteJar);

  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
             + " not found. Not running test.");
    return;
  }

  Configuration sleepConf = new Configuration(mrCluster.getConfig());
  // set master address to local to test that local mode applied iff framework == local
  sleepConf.set(MRConfig.MASTER_ADDRESS, "local");	
  
  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(sleepConf);
 
  // job with 3 maps (10s) and numReduces reduces (5s), 1 "record" each:
  Job job = sleepJob.createJob(3, numSleepReducers, 10000, 1, 5000, 1);

  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  if (useRemoteJar) {
    final Path localJar = new Path(
        ClassUtil.findContainingJar(SleepJob.class));
    ConfigUtil.addLink(job.getConfiguration(), "/jobjars",
        localFs.makeQualified(localJar.getParent()).toUri());
    job.setJar("viewfs:///jobjars/" + localJar.getName());
  } else {
    job.setJarByClass(SleepJob.class);
  }
  job.setMaxMapAttempts(1); // speed up failures
  job.submit();
  String trackingUrl = job.getTrackingURL();
  String jobId = job.getJobID().toString();
  boolean succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
  Assert.assertTrue("Tracking URL was " + trackingUrl +
                    " but didn't Match Job ID " + jobId ,
        trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
  verifySleepJobCounters(job);
  verifyTaskProgress(job);
  
  // TODO later:  add explicit "isUber()" checks of some sort (extend
  // JobStatus?)--compare against MRJobConfig.JOB_UBERTASK_ENABLE value
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:TestMRJobs.java

示例13: testJobClassloader

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
private void testJobClassloader(boolean useCustomClasses) throws IOException,
    InterruptedException, ClassNotFoundException {
  LOG.info("\n\n\nStarting testJobClassloader()"
      + " useCustomClasses=" + useCustomClasses);

  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
             + " not found. Not running test.");
    return;
  }
  final Configuration sleepConf = new Configuration(mrCluster.getConfig());
  // set master address to local to test that local mode applied iff framework == local
  sleepConf.set(MRConfig.MASTER_ADDRESS, "local");
  sleepConf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, true);
  if (useCustomClasses) {
    // to test AM loading user classes such as output format class, we want
    // to blacklist them from the system classes (they need to be prepended
    // as the first match wins)
    String systemClasses = ApplicationClassLoader.SYSTEM_CLASSES_DEFAULT;
    // exclude the custom classes from system classes
    systemClasses = "-" + CustomOutputFormat.class.getName() + ",-" +
        CustomSpeculator.class.getName() + "," +
        systemClasses;
    sleepConf.set(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES,
        systemClasses);
  }
  sleepConf.set(MRJobConfig.IO_SORT_MB, TEST_IO_SORT_MB);
  sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
  sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
  sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString());
  sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class");
  final SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(sleepConf);
  final Job job = sleepJob.createJob(1, 1, 10, 1, 10, 1);
  job.setMapperClass(ConfVerificationMapper.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.setJarByClass(SleepJob.class);
  job.setMaxMapAttempts(1); // speed up failures
  if (useCustomClasses) {
    // set custom output format class and speculator class
    job.setOutputFormatClass(CustomOutputFormat.class);
    final Configuration jobConf = job.getConfiguration();
    jobConf.setClass(MRJobConfig.MR_AM_JOB_SPECULATOR, CustomSpeculator.class,
        Speculator.class);
    // speculation needs to be enabled for the speculator to be loaded
    jobConf.setBoolean(MRJobConfig.MAP_SPECULATIVE, true);
  }
  job.submit();
  boolean succeeded = job.waitForCompletion(true);
  Assert.assertTrue("Job status: " + job.getStatus().getFailureInfo(),
      succeeded);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestMRJobs.java

示例14: testSleepJobWithSecurityOn

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
public void testSleepJobWithSecurityOn() throws IOException,
    InterruptedException, ClassNotFoundException {

  LOG.info("\n\n\nStarting testSleepJobWithSecurityOn().");

  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    return;
  }

  mrCluster.getConfig().set(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  mrCluster.getConfig().set(YarnConfiguration.RM_KEYTAB, "/etc/krb5.keytab");
  mrCluster.getConfig().set(YarnConfiguration.NM_KEYTAB, "/etc/krb5.keytab");
  mrCluster.getConfig().set(YarnConfiguration.RM_PRINCIPAL,
      "rm/[email protected]");
  mrCluster.getConfig().set(YarnConfiguration.NM_PRINCIPAL,
      "nm/[email protected]");
  UserGroupInformation.setConfiguration(mrCluster.getConfig());

  // Keep it in here instead of after RM/NM as multiple user logins happen in
  // the same JVM.
  UserGroupInformation user = UserGroupInformation.getCurrentUser();

  LOG.info("User name is " + user.getUserName());
  for (Token<? extends TokenIdentifier> str : user.getTokens()) {
    LOG.info("Token is " + str.encodeToUrlString());
  }
  user.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {  
      SleepJob sleepJob = new SleepJob();
      sleepJob.setConf(mrCluster.getConfig());
      Job job = sleepJob.createJob(3, 0, 10000, 1, 0, 0);
      // //Job with reduces
      // Job job = sleepJob.createJob(3, 2, 10000, 1, 10000, 1);
      job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
      job.submit();
      String trackingUrl = job.getTrackingURL();
      String jobId = job.getJobID().toString();
      job.waitForCompletion(true);
      Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
      Assert.assertTrue("Tracking URL was " + trackingUrl +
                        " but didn't Match Job ID " + jobId ,
        trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
      return null;
    }
  });

  // TODO later:  add explicit "isUber()" checks of some sort
}
 
开发者ID:naver,项目名称:hadoop,代码行数:52,代码来源:TestMRJobs.java

示例15: testJobHistoryData

import org.apache.hadoop.mapreduce.SleepJob; //导入依赖的package包/类
@Test (timeout = 90000)
public void testJobHistoryData() throws IOException, InterruptedException,
    AvroRemoteException, ClassNotFoundException {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }


  
  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(mrCluster.getConfig());
  // Job with 3 maps and 2 reduces
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.waitForCompletion(true);
  Counters counterMR = job.getCounters();
  JobId jobId = TypeConverter.toYarn(job.getJobID());
  ApplicationId appID = jobId.getAppId();
  int pollElapsed = 0;
  while (true) {
    Thread.sleep(1000);
    pollElapsed += 1000;

    if (TERMINAL_RM_APP_STATES.contains(
        mrCluster.getResourceManager().getRMContext().getRMApps().get(appID)
        .getState())) {
      break;
    }

    if (pollElapsed >= 60000) {
      LOG.warn("application did not reach terminal state within 60 seconds");
      break;
    }
  }
  Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager()
    .getRMContext().getRMApps().get(appID).getState());
  Counters counterHS = job.getCounters();
  //TODO the Assert below worked. need to check
  //Should we compare each field or convert to V2 counter and compare
  LOG.info("CounterHS " + counterHS);
  LOG.info("CounterMR " + counterMR);
  Assert.assertEquals(counterHS, counterMR);
  
  HSClientProtocol historyClient = instantiateHistoryProxy();
  GetJobReportRequest gjReq = Records.newRecord(GetJobReportRequest.class);
  gjReq.setJobId(jobId);
  JobReport jobReport = historyClient.getJobReport(gjReq).getJobReport();
  verifyJobReport(jobReport, jobId);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestMRJobsWithHistoryService.java


注:本文中的org.apache.hadoop.mapreduce.SleepJob类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。